UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testRefresh() throws Exception {
assertTrue("Mock queue should have been constructed",mockQueueConstructions > 0);
assertTrue("Puts are routed through MockQueue",canPutInMockQueue());
int lastMockQueueConstructions=mockQueueConstructions;
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refreshCallQueue"};
int exitCode=admin.run(args);
assertEquals("DFSAdmin should return 0",0,exitCode);
assertEquals("Mock queue should have no additional constructions",lastMockQueueConstructions,mockQueueConstructions);
try {
assertFalse("Puts are routed through LBQ instead of MockQueue",canPutInMockQueue());
}
catch ( IOException ioe) {
fail("Could not put into queue at all");
}
}
BranchVerifierTestInitializerUtilityVerifierHybridVerifier
@Before public void setUp() throws Exception {
mockQueueConstructions=0;
mockQueuePuts=0;
int portRetries=5;
int nnPort;
for (; portRetries > 0; --portRetries) {
nnPort=30000 + rand.nextInt(30000);
config=new Configuration();
callQueueConfigKey="ipc." + nnPort + ".callqueue.impl";
config.setClass(callQueueConfigKey,MockCallQueue.class,BlockingQueue.class);
config.set("hadoop.security.authorization","true");
FileSystem.setDefaultUri(config,"hdfs://localhost:" + nnPort);
fs=FileSystem.get(config);
try {
cluster=new MiniDFSCluster.Builder(config).nameNodePort(nnPort).build();
cluster.waitActive();
break;
}
catch ( BindException be) {
}
}
if (portRetries == 0) {
fail("Failed to pick an ephemeral port for the NameNode RPC server.");
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test HA failover, where BK, as the shared storage, fails.
* Once it becomes available again, a standby can come up.
* Verify that any write happening after the BK fail is not
* available on the standby.
*/
@Test public void testFailoverWithFailingBKCluster() throws Exception {
int ensembleSize=numBookies + 1;
BookieServer newBookie=bkutil.newBookie();
assertEquals("New bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10));
BookieServer replacementBookie=null;
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,BKJMUtil.createJournalURI("/hotfailoverWithFail").toString());
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,ensembleSize);
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,ensembleSize);
BKJMUtil.addJournalManagerDefinition(conf);
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).manageNameDfsSharedDirs(false).checkExitOnShutdown(false).build();
NameNode nn1=cluster.getNameNode(0);
NameNode nn2=cluster.getNameNode(1);
cluster.waitActive();
cluster.transitionToActive(0);
Path p1=new Path("/testBKJMFailingBKCluster1");
Path p2=new Path("/testBKJMFailingBKCluster2");
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
fs.mkdirs(p1);
newBookie.shutdown();
assertEquals("New bookie didn't stop",numBookies,bkutil.checkBookiesUp(numBookies,10));
try {
fs.mkdirs(p2);
fail("mkdirs should result in the NN exiting");
}
catch ( RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
}
cluster.shutdownNameNode(0);
try {
cluster.transitionToActive(1);
fail("Shouldn't have been able to transition with bookies down");
}
catch ( ExitException ee) {
assertTrue("Should shutdown due to required journal failure",ee.getMessage().contains("starting log segment 3 failed for required journal"));
}
replacementBookie=bkutil.newBookie();
assertEquals("Replacement bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10));
cluster.transitionToActive(1);
assertTrue(fs.exists(p1));
assertFalse(fs.exists(p2));
}
finally {
newBookie.shutdown();
if (replacementBookie != null) {
replacementBookie.shutdown();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifierBooleanVerifierHybridVerifier
/**
* Test that two namenodes can't continue as primary
*/
@Test public void testMultiplePrimariesStarted() throws Exception {
Path p1=new Path("/testBKJMMultiplePrimary");
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,BKJMUtil.createJournalURI("/hotfailoverMultiple").toString());
BKJMUtil.addJournalManagerDefinition(conf);
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).manageNameDfsSharedDirs(false).checkExitOnShutdown(false).build();
NameNode nn1=cluster.getNameNode(0);
NameNode nn2=cluster.getNameNode(1);
cluster.waitActive();
cluster.transitionToActive(0);
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
fs.mkdirs(p1);
nn1.getRpcServer().rollEditLog();
cluster.transitionToActive(1);
fs=cluster.getFileSystem(0);
try {
fs.delete(p1,true);
fail("Log update on older active should cause it to exit");
}
catch ( RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifierBooleanVerifierHybridVerifier
/**
* Test that bkjm will refuse open a stream on an empty
* ledger.
*/
@Test public void testEmptyInputStream() throws Exception {
ZooKeeper zk=BKJMUtil.connectZooKeeper();
BookKeeper bkc=new BookKeeper(new ClientConfiguration(),zk);
try {
LedgerHandle lh=bkc.createLedger(BookKeeper.DigestType.CRC32,"foobar".getBytes());
lh.close();
EditLogLedgerMetadata metadata=new EditLogLedgerMetadata("/foobar",HdfsConstants.NAMENODE_LAYOUT_VERSION,lh.getId(),0x1234);
try {
new BookKeeperEditLogInputStream(lh,metadata,-1);
fail("Shouldn't get this far, should have thrown");
}
catch ( IOException ioe) {
assertTrue(ioe.getMessage().contains("Invalid first bk entry to read"));
}
metadata=new EditLogLedgerMetadata("/foobar",HdfsConstants.NAMENODE_LAYOUT_VERSION,lh.getId(),0x1234);
try {
new BookKeeperEditLogInputStream(lh,metadata,0);
fail("Shouldn't get this far, should have thrown");
}
catch ( IOException ioe) {
assertTrue(ioe.getMessage().contains("Invalid first bk entry to read"));
}
}
finally {
bkc.close();
zk.close();
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that if enough bookies fail to prevent an ensemble,
* writes the bookkeeper will fail. Test that when once again
* an ensemble is available, it can continue to write.
*/
@Test public void testAllBookieFailure() throws Exception {
BookieServer bookieToFail=bkutil.newBookie();
BookieServer replacementBookie=null;
try {
int ensembleSize=numBookies + 1;
assertEquals("New bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10));
Configuration conf=new Configuration();
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,ensembleSize);
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,ensembleSize);
long txid=1;
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-allbookiefailure"),nsi);
bkjm.format(nsi);
EditLogOutputStream out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long i=1; i <= 3; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.setReadyToFlush();
out.flush();
bookieToFail.shutdown();
assertEquals("New bookie didn't die",numBookies,bkutil.checkBookiesUp(numBookies,10));
try {
for (long i=1; i <= 3; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.setReadyToFlush();
out.flush();
fail("should not get to this stage");
}
catch ( IOException ioe) {
LOG.debug("Error writing to bookkeeper",ioe);
assertTrue("Invalid exception message",ioe.getMessage().contains("Failed to write to bookkeeper"));
}
replacementBookie=bkutil.newBookie();
assertEquals("New bookie didn't start",numBookies + 1,bkutil.checkBookiesUp(numBookies + 1,10));
bkjm.recoverUnfinalizedSegments();
out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long i=1; i <= 3; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.setReadyToFlush();
out.flush();
}
catch ( Exception e) {
LOG.error("Exception in test",e);
throw e;
}
finally {
if (replacementBookie != null) {
replacementBookie.shutdown();
}
bookieToFail.shutdown();
if (bkutil.checkBookiesUp(numBookies,30) != numBookies) {
LOG.warn("Not all bookies from this test shut down, expect errors");
}
}
}
IterativeVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testNumberOfTransactionsWithInprogressAtEnd() throws Exception {
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-inprogressAtEnd"),nsi);
bkjm.format(nsi);
long txid=1;
for (long i=0; i < 3; i++) {
long start=txid;
EditLogOutputStream out=bkjm.startLogSegment(start,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long j=1; j <= DEFAULT_SEGMENT_SIZE; j++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.close();
bkjm.finalizeLogSegment(start,(txid - 1));
assertNotNull(zkc.exists(bkjm.finalizedLedgerZNode(start,(txid - 1)),false));
}
long start=txid;
EditLogOutputStream out=bkjm.startLogSegment(start,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long j=1; j <= DEFAULT_SEGMENT_SIZE / 2; j++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.setReadyToFlush();
out.flush();
out.abort();
out.close();
long numTrans=bkjm.getNumberOfTransactions(1,true);
assertEquals((txid - 1),numTrans);
}
IterativeVerifierUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testNumberOfTransactionsWithGaps() throws Exception {
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-gaps"),nsi);
bkjm.format(nsi);
long txid=1;
for (long i=0; i < 3; i++) {
long start=txid;
EditLogOutputStream out=bkjm.startLogSegment(start,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long j=1; j <= DEFAULT_SEGMENT_SIZE; j++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.close();
bkjm.finalizeLogSegment(start,txid - 1);
assertNotNull(zkc.exists(bkjm.finalizedLedgerZNode(start,txid - 1),false));
}
zkc.delete(bkjm.finalizedLedgerZNode(DEFAULT_SEGMENT_SIZE + 1,DEFAULT_SEGMENT_SIZE * 2),-1);
long numTrans=bkjm.getNumberOfTransactions(1,true);
assertEquals(DEFAULT_SEGMENT_SIZE,numTrans);
try {
numTrans=bkjm.getNumberOfTransactions(DEFAULT_SEGMENT_SIZE + 1,true);
fail("Should have thrown corruption exception by this point");
}
catch ( JournalManager.CorruptionException ce) {
}
numTrans=bkjm.getNumberOfTransactions((DEFAULT_SEGMENT_SIZE * 2) + 1,true);
assertEquals(DEFAULT_SEGMENT_SIZE,numTrans);
}
UtilityVerifierBooleanVerifierHybridVerifier
/**
* If a journal manager has an corrupt inprogress node, ensure that we throw
* an error, as this should not be possible, and some third party has
* corrupted the zookeeper state
*/
@Test public void testCorruptInprogressNode() throws Exception {
URI uri=BKJMUtil.createJournalURI("/hdfsjournal-corruptInprogress");
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,uri,nsi);
bkjm.format(nsi);
EditLogOutputStream out=bkjm.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
;
for (long i=1; i <= 100; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(i);
out.write(op);
}
out.close();
bkjm.finalizeLogSegment(1,100);
out=bkjm.startLogSegment(101,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
out.close();
bkjm.close();
String inprogressZNode=bkjm.inprogressZNode(101);
zkc.setData(inprogressZNode,"WholeLottaJunk".getBytes(),-1);
bkjm=new BookKeeperJournalManager(conf,uri,nsi);
try {
bkjm.recoverUnfinalizedSegments();
fail("Should have failed. There should be no way of creating" + " an empty inprogess znode");
}
catch ( IOException e) {
assertTrue("Exception different than expected",e.getMessage().contains("has no field named"));
}
finally {
bkjm.close();
}
}
UtilityVerifierBooleanVerifierHybridVerifier
/**
* If a journal manager has an empty inprogress node, ensure that we throw an
* error, as this should not be possible, and some third party has corrupted
* the zookeeper state
*/
@Test public void testEmptyInprogressNode() throws Exception {
URI uri=BKJMUtil.createJournalURI("/hdfsjournal-emptyInprogress");
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,uri,nsi);
bkjm.format(nsi);
EditLogOutputStream out=bkjm.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
;
for (long i=1; i <= 100; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(i);
out.write(op);
}
out.close();
bkjm.finalizeLogSegment(1,100);
out=bkjm.startLogSegment(101,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
out.close();
bkjm.close();
String inprogressZNode=bkjm.inprogressZNode(101);
zkc.setData(inprogressZNode,new byte[0],-1);
bkjm=new BookKeeperJournalManager(conf,uri,nsi);
try {
bkjm.recoverUnfinalizedSegments();
fail("Should have failed. There should be no way of creating" + " an empty inprogess znode");
}
catch ( IOException e) {
assertTrue("Exception different than expected",e.getMessage().contains("Invalid/Incomplete data in znode"));
}
finally {
bkjm.close();
}
}
UtilityVerifierInternalCallVerifierNullVerifierHybridVerifier
/**
* Create a bkjm namespace, write a journal from txid 1, close stream.
* Try to create a new journal from txid 1. Should throw an exception.
*/
@Test public void testWriteRestartFrom1() throws Exception {
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-restartFrom1"),nsi);
bkjm.format(nsi);
long txid=1;
long start=txid;
EditLogOutputStream out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long j=1; j <= DEFAULT_SEGMENT_SIZE; j++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.close();
bkjm.finalizeLogSegment(start,(txid - 1));
txid=1;
try {
out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
fail("Shouldn't be able to start another journal from " + txid + " when one already exists");
}
catch ( Exception ioe) {
LOG.info("Caught exception as expected",ioe);
}
txid=DEFAULT_SEGMENT_SIZE;
try {
out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
fail("Shouldn't be able to start another journal from " + txid + " when one already exists");
}
catch ( IOException ioe) {
LOG.info("Caught exception as expected",ioe);
}
txid=DEFAULT_SEGMENT_SIZE + 1;
start=txid;
out=bkjm.startLogSegment(start,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
assertNotNull(out);
for (long j=1; j <= DEFAULT_SEGMENT_SIZE; j++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.close();
bkjm.finalizeLogSegment(start,(txid - 1));
txid=DEFAULT_SEGMENT_SIZE * 4;
out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
assertNotNull(out);
}
InternalCallVerifierEqualityVerifierExceptionVerifierHybridVerifier
/**
* Tests that update should throw IOE, if version number modifies between read
* and update
*/
@Test(expected=IOException.class) public void testUpdateShouldFailWithIOEIfVersionNumberChangedAfterRead() throws Exception {
CurrentInprogress ci=new CurrentInprogress(zkc,CURRENT_NODE_PATH);
ci.init();
ci.update("myInprogressZnode");
assertEquals("Not returning myInprogressZnode","myInprogressZnode",ci.read());
ci.update("YourInprogressZnode");
ci.update("myInprogressZnode");
}
BranchVerifierUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testGenerateEncryptedKey() throws Exception {
KeyProviderCryptoExtension.EncryptedKeyVersion ek1=kpExt.generateEncryptedKey(encryptionKey.getName());
assertEquals("Version name of EEK should be EEK",KeyProviderCryptoExtension.EEK,ek1.getEncryptedKeyVersion().getVersionName());
assertEquals("Name of EEK should be encryption key name",ENCRYPTION_KEY_NAME,ek1.getEncryptionKeyName());
assertNotNull("Expected encrypted key material",ek1.getEncryptedKeyVersion().getMaterial());
assertEquals("Length of encryption key material and EEK material should " + "be the same",encryptionKey.getMaterial().length,ek1.getEncryptedKeyVersion().getMaterial().length);
KeyVersion k1=kpExt.decryptEncryptedKey(ek1);
assertEquals(KeyProviderCryptoExtension.EK,k1.getVersionName());
assertEquals(encryptionKey.getMaterial().length,k1.getMaterial().length);
if (Arrays.equals(k1.getMaterial(),encryptionKey.getMaterial())) {
fail("Encrypted key material should not equal encryption key material");
}
if (Arrays.equals(ek1.getEncryptedKeyVersion().getMaterial(),encryptionKey.getMaterial())) {
fail("Encrypted key material should not equal decrypted key material");
}
KeyVersion k1a=kpExt.decryptEncryptedKey(ek1);
assertArrayEquals(k1.getMaterial(),k1a.getMaterial());
KeyProviderCryptoExtension.EncryptedKeyVersion ek2=kpExt.generateEncryptedKey(encryptionKey.getName());
KeyVersion k2=kpExt.decryptEncryptedKey(ek2);
if (Arrays.equals(k1.getMaterial(),k2.getMaterial())) {
fail("Generated EEKs should have different material!");
}
if (Arrays.equals(ek1.getEncryptedKeyIv(),ek2.getEncryptedKeyIv())) {
fail("Generated EEKs should have different IVs!");
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
@Test public void testJksProvider() throws Exception {
Configuration conf=new Configuration();
final String ourUrl=JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir+ "/test.jks";
File file=new File(tmpDir,"test.jks");
file.delete();
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,ourUrl);
checkSpecificProvider(conf,ourUrl);
Path path=ProviderUtils.unnestUri(new URI(ourUrl));
FileSystem fs=path.getFileSystem(conf);
FileStatus s=fs.getFileStatus(path);
assertTrue(s.getPermission().toString().equals("rwx------"));
assertTrue(file + " should exist",file.isFile());
File oldFile=new File(file.getPath() + "_OLD");
file.renameTo(oldFile);
file.delete();
file.createNewFile();
assertTrue(oldFile.exists());
KeyProvider provider=KeyProviderFactory.getProviders(conf).get(0);
assertTrue(file.exists());
assertTrue(oldFile + "should be deleted",!oldFile.exists());
verifyAfterReload(file,provider);
assertTrue(!oldFile.exists());
File newFile=new File(file.getPath() + "_NEW");
newFile.createNewFile();
try {
provider=KeyProviderFactory.getProviders(conf).get(0);
Assert.fail("_NEW and current file should not exist together !!");
}
catch ( Exception e) {
}
finally {
if (newFile.exists()) {
newFile.delete();
}
}
file.renameTo(newFile);
file.delete();
try {
provider=KeyProviderFactory.getProviders(conf).get(0);
Assert.assertFalse(newFile.exists());
Assert.assertFalse(oldFile.exists());
}
catch ( Exception e) {
Assert.fail("JKS should load from _NEW file !!");
}
verifyAfterReload(file,provider);
newFile.createNewFile();
file.renameTo(oldFile);
file.delete();
try {
provider=KeyProviderFactory.getProviders(conf).get(0);
Assert.assertFalse(newFile.exists());
Assert.assertFalse(oldFile.exists());
}
catch ( Exception e) {
Assert.fail("JKS should load from _OLD file !!");
}
finally {
if (newFile.exists()) {
newFile.delete();
}
}
verifyAfterReload(file,provider);
fs.setPermission(path,new FsPermission("777"));
checkPermissionRetention(conf,ourUrl,path);
}
BooleanVerifierEqualityVerifierHybridVerifier
@Test public void testUriErrors() throws Exception {
Configuration conf=new Configuration();
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,"unkn@own:/x/y");
try {
List providers=KeyProviderFactory.getProviders(conf);
assertTrue("should throw!",false);
}
catch ( IOException e) {
assertEquals("Bad configuration of " + KeyProviderFactory.KEY_PROVIDER_PATH + " at unkn@own:/x/y",e.getMessage());
}
}
BooleanVerifierEqualityVerifierHybridVerifier
@Test public void testFactoryErrors() throws Exception {
Configuration conf=new Configuration();
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,"unknown:///");
try {
List providers=KeyProviderFactory.getProviders(conf);
assertTrue("should throw!",false);
}
catch ( IOException e) {
assertEquals("No KeyProviderFactory for unknown:/// in " + KeyProviderFactory.KEY_PROVIDER_PATH,e.getMessage());
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierNullVerifierHybridVerifier
@Test public void testJksProviderPasswordViaConfig() throws Exception {
Configuration conf=new Configuration();
final String ourUrl=JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir+ "/test.jks";
File file=new File(tmpDir,"test.jks");
file.delete();
try {
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,ourUrl);
conf.set(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY,"javakeystoreprovider.password");
KeyProvider provider=KeyProviderFactory.getProviders(conf).get(0);
provider.createKey("key3",new byte[16],KeyProvider.options(conf));
provider.flush();
}
catch ( Exception ex) {
Assert.fail("could not create keystore with password file");
}
KeyProvider provider=KeyProviderFactory.getProviders(conf).get(0);
Assert.assertNotNull(provider.getCurrentKey("key3"));
try {
conf.set(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY,"bar");
KeyProviderFactory.getProviders(conf).get(0);
Assert.fail("using non existing password file, it should fail");
}
catch ( IOException ex) {
}
try {
conf.set(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY,"core-site.xml");
KeyProviderFactory.getProviders(conf).get(0);
Assert.fail("using different password file, it should fail");
}
catch ( IOException ex) {
}
try {
conf.unset(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY);
KeyProviderFactory.getProviders(conf).get(0);
Assert.fail("No password file property, env not set, it should fail");
}
catch ( IOException ex) {
}
}
Class: org.apache.hadoop.crypto.key.TestKeyShell
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testInvalidProvider() throws Exception {
final String[] args1={"create","key1","-cipher","AES","-provider","sdff://file/tmp/keystore.jceks"};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(1,rc);
assertTrue(outContent.toString().contains("There are no valid " + "KeyProviders configured."));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testInvalidKeySize() throws Exception {
final String[] args1={"create","key1","-size","56","-provider",jceksProvider};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(1,rc);
assertTrue(outContent.toString().contains("key1 has not been created."));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testTransientProviderOnlyConfig() throws Exception {
final String[] args1={"create","key1"};
int rc=0;
KeyShell ks=new KeyShell();
Configuration config=new Configuration();
config.set(KeyProviderFactory.KEY_PROVIDER_PATH,"user:///");
ks.setConf(config);
rc=ks.run(args1);
assertEquals(1,rc);
assertTrue(outContent.toString().contains("There are no valid " + "KeyProviders configured."));
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testKeySuccessfulKeyLifecycle() throws Exception {
int rc=0;
String keyName="key1";
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
outContent.reset();
final String[] args1={"create",keyName,"-provider",jceksProvider};
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains(keyName + " has been " + "successfully created"));
String listOut=listKeys(ks,false);
assertTrue(listOut.contains(keyName));
listOut=listKeys(ks,true);
assertTrue(listOut.contains(keyName));
assertTrue(listOut.contains("description"));
assertTrue(listOut.contains("created"));
outContent.reset();
final String[] args2={"roll",keyName,"-provider",jceksProvider};
rc=ks.run(args2);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("key1 has been successfully " + "rolled."));
deleteKey(ks,keyName);
listOut=listKeys(ks,false);
assertFalse(listOut,listOut.contains(keyName));
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testAttributes() throws Exception {
int rc;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
final String[] args1={"create","keyattr1","-provider",jceksProvider,"-attr","foo=bar"};
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("keyattr1 has been " + "successfully created"));
String listOut=listKeys(ks,true);
assertTrue(listOut.contains("keyattr1"));
assertTrue(listOut.contains("attributes: [foo=bar]"));
outContent.reset();
final String[] args2={"create","keyattr2","-provider",jceksProvider,"-attr","=bar"};
rc=ks.run(args2);
assertEquals(1,rc);
outContent.reset();
args2[5]="foo";
rc=ks.run(args2);
assertEquals(1,rc);
outContent.reset();
args2[5]="=";
rc=ks.run(args2);
assertEquals(1,rc);
outContent.reset();
args2[5]="a=b=c";
rc=ks.run(args2);
assertEquals(0,rc);
listOut=listKeys(ks,true);
assertTrue(listOut.contains("keyattr2"));
assertTrue(listOut.contains("attributes: [a=b=c]"));
outContent.reset();
final String[] args3={"create","keyattr3","-provider",jceksProvider,"-attr","foo = bar","-attr"," glarch =baz ","-attr","abc=def"};
rc=ks.run(args3);
assertEquals(0,rc);
listOut=listKeys(ks,true);
assertTrue(listOut.contains("keyattr3"));
assertTrue(listOut.contains("[foo=bar]"));
assertTrue(listOut.contains("[glarch=baz]"));
assertTrue(listOut.contains("[abc=def]"));
outContent.reset();
final String[] args4={"create","keyattr4","-provider",jceksProvider,"-attr","foo=bar","-attr","foo=glarch"};
rc=ks.run(args4);
assertEquals(1,rc);
deleteKey(ks,"keyattr1");
deleteKey(ks,"keyattr2");
deleteKey(ks,"keyattr3");
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testInvalidCipher() throws Exception {
final String[] args1={"create","key1","-cipher","LJM","-provider",jceksProvider};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(1,rc);
assertTrue(outContent.toString().contains("key1 has not been created."));
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testKeySuccessfulCreationWithDescription() throws Exception {
outContent.reset();
final String[] args1={"create","key1","-provider",jceksProvider,"-description","someDescription"};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("key1 has been successfully " + "created"));
String listOut=listKeys(ks,true);
assertTrue(listOut.contains("description"));
assertTrue(listOut.contains("someDescription"));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testTransientProviderWarning() throws Exception {
final String[] args1={"create","key1","-cipher","AES","-provider","user:///"};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("WARNING: you are modifying a " + "transient provider."));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testFullCipher() throws Exception {
final String keyName="key1";
final String[] args1={"create",keyName,"-cipher","AES/CBC/pkcs5Padding","-provider",jceksProvider};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains(keyName + " has been " + "successfully created"));
deleteKey(ks,keyName);
}
APIUtilityVerifierBranchVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testUnsupportedSymlink() throws IOException {
Path file=getTestRootPath(fc,"file");
Path link=getTestRootPath(fc,"linkToFile");
if (!fc.getDefaultFileSystem().supportsSymlinks()) {
try {
fc.createSymlink(file,link,false);
Assert.fail("Created a symlink on a file system that " + "does not support symlinks.");
}
catch ( IOException e) {
}
createFile(file);
try {
fc.getLinkTarget(file);
Assert.fail("Got a link target on a file system that " + "does not support symlinks.");
}
catch ( IOException e) {
}
Assert.assertEquals(fc.getFileStatus(file),fc.getFileLinkStatus(file));
}
}
UtilityVerifierBooleanVerifierHybridVerifier
@Test public void testRenameFileToNonExistentDirectory() throws Exception {
if (!renameSupported()) return;
Path src=getTestRootPath(fc,"test/hadoop/file");
createFile(src);
Path dst=getTestRootPath(fc,"test/nonExistent/newfile");
try {
rename(src,dst,false,true,false,Rename.NONE);
Assert.fail("Expected exception was not thrown");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
try {
rename(src,dst,false,true,false,Rename.OVERWRITE);
Assert.fail("Expected exception was not thrown");
}
catch ( IOException e) {
Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
}
InternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testFsWithMyOwnAndChildTokens() throws Exception {
Credentials credentials=new Credentials();
Text service1=new Text("singleTokenFs1");
Text service2=new Text("singleTokenFs2");
Text myService=new Text("multiTokenFs");
Token> token=mock(Token.class);
credentials.addToken(service2,token);
MockFileSystem fs1=createFileSystemForServiceName(service1);
MockFileSystem fs2=createFileSystemForServiceName(service2);
MockFileSystem multiFs=createFileSystemForServiceName(myService,fs1,fs2);
multiFs.addDelegationTokens(renewer,credentials);
verifyTokenFetch(multiFs,true);
verifyTokenFetch(fs1,true);
verifyTokenFetch(fs2,false);
assertEquals(3,credentials.numberOfTokens());
assertNotNull(credentials.getToken(myService));
assertNotNull(credentials.getToken(service1));
assertNotNull(credentials.getToken(service2));
}
Class: org.apache.hadoop.fs.TestFileUtil
APIUtilityVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testListAPI() throws IOException {
setupDirs();
String[] files=FileUtil.list(partitioned);
Assert.assertEquals("Unexpected number of pre-existing files",2,files.length);
File newDir=new File(tmp.getPath(),"test");
newDir.mkdir();
Assert.assertTrue("Failed to create test dir",newDir.exists());
files=FileUtil.list(newDir);
Assert.assertEquals("New directory unexpectedly contains files",0,files.length);
newDir.delete();
Assert.assertFalse("Failed to delete test dir",newDir.exists());
try {
files=FileUtil.list(newDir);
Assert.fail("IOException expected on list() for non-existent dir " + newDir.toString());
}
catch ( IOException ioe) {
}
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Tests if fullyDelete deletes
* (a) symlink to file only and not the file pointed to by symlink.
* (b) symlink to dir only and not the dir pointed to by symlink.
* @throws IOException
*/
@Test(timeout=30000) public void testFullyDeleteSymlinks() throws IOException {
setupDirs();
File link=new File(del,LINK);
Assert.assertEquals(5,del.list().length);
boolean ret=FileUtil.fullyDelete(link);
Assert.assertTrue(ret);
Assert.assertFalse(link.exists());
Assert.assertEquals(4,del.list().length);
validateTmpDir();
File linkDir=new File(del,"tmpDir");
ret=FileUtil.fullyDelete(linkDir);
Assert.assertTrue(ret);
Assert.assertFalse(linkDir.exists());
Assert.assertEquals(3,del.list().length);
validateTmpDir();
}
BranchVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that length on a symlink works as expected.
*/
@Test(timeout=30000) public void testSymlinkLength() throws Exception {
Assert.assertFalse(del.exists());
del.mkdirs();
byte[] data="testSymLinkData".getBytes();
File file=new File(del,FILE);
File link=new File(del,"_link");
FileOutputStream os=new FileOutputStream(file);
os.write(data);
os.close();
Assert.assertEquals(0,link.length());
FileUtil.symLink(file.getAbsolutePath(),link.getAbsolutePath());
Assert.assertEquals(data.length,file.length());
Assert.assertEquals(data.length,link.length());
file.delete();
Assert.assertFalse(file.exists());
if (Shell.WINDOWS && !Shell.isJava7OrAbove()) {
Assert.assertEquals(data.length,link.length());
}
else {
Assert.assertEquals(0,link.length());
}
link.delete();
Assert.assertFalse(link.exists());
}
APIUtilityVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test that getDU is able to handle cycles caused due to symbolic links
* and that directory sizes are not added to the final calculated size
* @throws IOException
*/
@Test(timeout=30000) public void testGetDU() throws Exception {
setupDirs();
long du=FileUtil.getDU(TEST_DIR);
final long expected=2 * (3 + System.getProperty("line.separator").length());
Assert.assertEquals(expected,du);
final File doesNotExist=new File(tmp,"QuickBrownFoxJumpsOverTheLazyDog");
long duDoesNotExist=FileUtil.getDU(doesNotExist);
assertEquals(0,duDoesNotExist);
File notADirectory=new File(partitioned,"part-r-00000");
long duNotADirectoryActual=FileUtil.getDU(notADirectory);
long duNotADirectoryExpected=3 + System.getProperty("line.separator").length();
assertEquals(duNotADirectoryExpected,duNotADirectoryActual);
try {
try {
FileUtil.chmod(notADirectory.getAbsolutePath(),"0000");
}
catch ( InterruptedException ie) {
assertNull(ie);
}
assertFalse(FileUtil.canRead(notADirectory));
final long du3=FileUtil.getDU(partitioned);
assertEquals(expected,du3);
try {
FileUtil.chmod(partitioned.getAbsolutePath(),"0000");
}
catch ( InterruptedException ie) {
assertNull(ie);
}
assertFalse(FileUtil.canRead(partitioned));
final long du4=FileUtil.getDU(partitioned);
assertEquals(0,du4);
}
finally {
FileUtil.chmod(partitioned.getAbsolutePath(),"0777",true);
}
}
APIUtilityVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testListFiles() throws IOException {
setupDirs();
File[] files=FileUtil.listFiles(partitioned);
Assert.assertEquals(2,files.length);
File newDir=new File(tmp.getPath(),"test");
newDir.mkdir();
Assert.assertTrue("Failed to create test dir",newDir.exists());
files=FileUtil.listFiles(newDir);
Assert.assertEquals(0,files.length);
newDir.delete();
Assert.assertFalse("Failed to delete test dir",newDir.exists());
try {
files=FileUtil.listFiles(newDir);
Assert.fail("IOException expected on listFiles() for non-existent dir " + newDir.toString());
}
catch ( IOException ioe) {
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testRmForceWithNonexistentGlob() throws Exception {
Configuration conf=new Configuration();
FsShell shell=new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes=new ByteArrayOutputStream();
final PrintStream err=new PrintStream(bytes);
final PrintStream oldErr=System.err;
System.setErr(err);
try {
int exit=shell.run(new String[]{"-rm","-f","nomatch*"});
assertEquals(0,exit);
assertTrue(bytes.toString().isEmpty());
}
finally {
IOUtils.closeStream(err);
System.setErr(oldErr);
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testGetWithInvalidSourcePathShouldNotDisplayNullInConsole() throws Exception {
Configuration conf=new Configuration();
FsShell shell=new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes=new ByteArrayOutputStream();
final PrintStream out=new PrintStream(bytes);
final PrintStream oldErr=System.err;
System.setErr(out);
final String results;
try {
Path tdir=new Path(TEST_ROOT_DIR,"notNullCopy");
fileSys.delete(tdir,true);
fileSys.mkdirs(tdir);
String[] args=new String[3];
args[0]="-get";
args[1]=new Path(tdir.toUri().getPath(),"/invalidSrc").toString();
args[2]=new Path(tdir.toUri().getPath(),"/invalidDst").toString();
assertTrue("file exists",!fileSys.exists(new Path(args[1])));
assertTrue("file exists",!fileSys.exists(new Path(args[2])));
int run=shell.run(args);
results=bytes.toString();
assertEquals("Return code should be 1",1,run);
assertTrue(" Null is coming when source path is invalid. ",!results.contains("get: null"));
assertTrue(" Not displaying the intended message ",results.contains("get: `" + args[1] + "': No such file or directory"));
}
finally {
IOUtils.closeStream(out);
System.setErr(oldErr);
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testRmWithNonexistentGlob() throws Exception {
Configuration conf=new Configuration();
FsShell shell=new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes=new ByteArrayOutputStream();
final PrintStream err=new PrintStream(bytes);
final PrintStream oldErr=System.err;
System.setErr(err);
final String results;
try {
int exit=shell.run(new String[]{"-rm","nomatch*"});
assertEquals(1,exit);
results=bytes.toString();
assertTrue(results.contains("rm: `nomatch*': No such file or directory"));
}
finally {
IOUtils.closeStream(err);
System.setErr(oldErr);
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test Chmod 1. Create and write file on FS 2. Verify that exit code for
* chmod on existing file is 0 3. Verify that exit code for chmod on
* non-existing file is 1 4. Verify that exit code for chmod with glob input
* on non-existing file is 1 5. Verify that exit code for chmod with glob
* input on existing file in 0
* @throws Exception
*/
@Test(timeout=30000) public void testChmod() throws Exception {
Path p1=new Path(TEST_ROOT_DIR,"testChmod/fileExists");
final String f1=p1.toUri().getPath();
final String f2=new Path(TEST_ROOT_DIR,"testChmod/fileDoesNotExist").toUri().getPath();
final String f3=new Path(TEST_ROOT_DIR,"testChmod/nonExistingfiles*").toUri().getPath();
final Path p4=new Path(TEST_ROOT_DIR,"testChmod/file1");
final Path p5=new Path(TEST_ROOT_DIR,"testChmod/file2");
final Path p6=new Path(TEST_ROOT_DIR,"testChmod/file3");
final String f7=new Path(TEST_ROOT_DIR,"testChmod/file*").toUri().getPath();
writeFile(fileSys,p1);
assertTrue(fileSys.exists(p1));
String argv[]={"-chmod","777",f1};
assertEquals(0,fsShell.run(argv));
String argv2[]={"-chmod","777",f2};
assertEquals(1,fsShell.run(argv2));
String argv3[]={"-chmod","777",f3};
assertEquals(1,fsShell.run(argv3));
writeFile(fileSys,p4);
assertTrue(fileSys.exists(p4));
writeFile(fileSys,p5);
assertTrue(fileSys.exists(p5));
writeFile(fileSys,p6);
assertTrue(fileSys.exists(p6));
String argv4[]={"-chmod","777",f7};
assertEquals(0,fsShell.run(argv4));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testInterrupt() throws Exception {
MyFsShell shell=new MyFsShell();
shell.setConf(new Configuration());
final Path d=new Path(TEST_ROOT_DIR,"testInterrupt");
final Path f1=new Path(d,"f1");
final Path f2=new Path(d,"f2");
assertTrue(fileSys.mkdirs(d));
writeFile(fileSys,f1);
assertTrue(fileSys.isFile(f1));
writeFile(fileSys,f2);
assertTrue(fileSys.isFile(f2));
int exitCode=shell.run(new String[]{"-testInterrupt",f1.toString(),f2.toString()});
assertEquals(1,InterruptCommand.processed);
assertEquals(130,exitCode);
exitCode=shell.run(new String[]{"-testInterrupt",d.toString()});
assertEquals(2,InterruptCommand.processed);
assertEquals(130,exitCode);
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test createHardLinkMult(), again, this time with the "too long list"
* case where the total size of the command line arguments exceed the
* allowed maximum. In this case, the list should be automatically
* broken up into chunks, each chunk no larger than the max allowed.
* We use an extended version of the method call, specifying the
* size limit explicitly, to simulate the "too long" list with a
* relatively short list.
*/
@Test public void testCreateHardLinkMultOversizeAndEmpty() throws IOException {
String name1="x11111111";
String name2="x22222222";
String name3="x33333333";
File x1_long=new File(src,name1);
File x2_long=new File(src,name2);
File x3_long=new File(src,name3);
x1.renameTo(x1_long);
x2.renameTo(x2_long);
x3.renameTo(x3_long);
assertTrue(x1_long.exists());
assertTrue(x2_long.exists());
assertTrue(x3_long.exists());
assertFalse(x1.exists());
assertFalse(x2.exists());
assertFalse(x3.exists());
int callCount;
String[] emptyList={};
String[] fileNames=src.list();
int overhead=getLinkMultArgLength(src,emptyList,tgt_mult);
int maxLength=overhead + (int)(2.5 * (float)(1 + name1.length()));
callCount=createHardLinkMult(src,fileNames,tgt_mult,maxLength);
assertEquals(2,callCount);
String[] tgt_multNames=tgt_mult.list();
Arrays.sort(fileNames);
Arrays.sort(tgt_multNames);
assertArrayEquals(fileNames,tgt_multNames);
FileUtil.fullyDelete(tgt_mult);
assertFalse(tgt_mult.exists());
tgt_mult.mkdirs();
assertTrue(tgt_mult.exists() && tgt_mult.list().length == 0);
maxLength=overhead + (int)(0.5 * (float)(1 + name1.length()));
callCount=createHardLinkMult(src,fileNames,tgt_mult,maxLength);
assertEquals(3,callCount);
tgt_multNames=tgt_mult.list();
Arrays.sort(fileNames);
Arrays.sort(tgt_multNames);
assertArrayEquals(fileNames,tgt_multNames);
}
Class: org.apache.hadoop.fs.TestListFiles
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test when input path is a file
*/
@Test public void testFile() throws IOException {
fs.mkdirs(TEST_DIR);
writeFile(fs,FILE1,FILE_LEN);
RemoteIterator itor=fs.listFiles(FILE1,true);
LocatedFileStatus stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fs.makeQualified(FILE1),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
itor=fs.listFiles(FILE1,false);
stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fs.makeQualified(FILE1),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
fs.delete(FILE1,true);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierAssumptionSetterEqualityVerifierHybridVerifier
/**
* Test that {@link LocalDirAllocator#getAllLocalPathsToRead(String,Configuration)}
* returns correct filenames and "file" schema.
* @throws IOException
*/
@Test(timeout=30000) public void testGetAllLocalPathsToRead() throws IOException {
assumeTrue(!isWindows);
String dir0=buildBufferDir(ROOT,0);
String dir1=buildBufferDir(ROOT,1);
try {
conf.set(CONTEXT,dir0 + "," + dir1);
assertTrue(localFs.mkdirs(new Path(dir0)));
assertTrue(localFs.mkdirs(new Path(dir1)));
localFs.create(new Path(dir0 + Path.SEPARATOR + FILENAME));
localFs.create(new Path(dir1 + Path.SEPARATOR + FILENAME));
final Iterable pathIterable=dirAllocator.getAllLocalPathsToRead(FILENAME,conf);
int count=0;
for ( final Path p : pathIterable) {
count++;
assertEquals(FILENAME,p.getName());
assertEquals("file",p.getFileSystem(conf).getUri().getScheme());
}
assertEquals(2,count);
try {
Path p=pathIterable.iterator().next();
assertFalse("NoSuchElementException must be thrown, but returned [" + p + "] instead.",true);
}
catch ( NoSuchElementException nsee) {
}
final Iterable pathIterable2=dirAllocator.getAllLocalPathsToRead(FILENAME,conf);
final Iterator it=pathIterable2.iterator();
try {
it.remove();
assertFalse(true);
}
catch ( UnsupportedOperationException uoe) {
}
}
finally {
Shell.execCommand(new String[]{"chmod","u+w",BUFFER_DIR_ROOT});
rmBufferDirs();
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierAssumptionSetterHybridVerifier
/**
* Test no side effect files are left over. After creating a temp
* temp file, remove both the temp file and its parent. Verify that
* no files or directories are left over as can happen when File objects
* are mistakenly created from fully qualified path strings.
* @throws IOException
*/
@Test(timeout=30000) public void testNoSideEffects() throws IOException {
assumeTrue(!isWindows);
String dir=buildBufferDir(ROOT,0);
try {
conf.set(CONTEXT,dir);
File result=dirAllocator.createTmpFileForWrite(FILENAME,-1,conf);
assertTrue(result.delete());
assertTrue(result.getParentFile().delete());
assertFalse(new File(dir).exists());
}
finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w",false,BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testShouldNotthrowNPE() throws Exception {
Configuration conf1=new Configuration();
try {
dirAllocator.getLocalPathForWrite("/test",conf1);
fail("Exception not thrown when " + CONTEXT + " is not set");
}
catch ( IOException e) {
assertEquals(CONTEXT + " not configured",e.getMessage());
}
catch ( NullPointerException e) {
fail("Lack of configuration should not have thrown an NPE.");
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Two buffer dirs. The first dir does not exist & is on a read-only disk;
* The second dir exists & is RW
* getLocalPathForWrite with checkAccess set to false should create a parent
* directory. With checkAccess true, the directory should not be created.
* @throws Exception
*/
@Test(timeout=30000) public void testLocalPathForWriteDirCreation() throws IOException {
String dir0=buildBufferDir(ROOT,0);
String dir1=buildBufferDir(ROOT,1);
try {
conf.set(CONTEXT,dir0 + "," + dir1);
assertTrue(localFs.mkdirs(new Path(dir1)));
BUFFER_ROOT.setReadOnly();
Path p1=dirAllocator.getLocalPathForWrite("p1/x",SMALL_FILE_SIZE,conf);
assertTrue(localFs.getFileStatus(p1.getParent()).isDirectory());
Path p2=dirAllocator.getLocalPathForWrite("p2/x",SMALL_FILE_SIZE,conf,false);
try {
localFs.getFileStatus(p2.getParent());
}
catch ( Exception e) {
assertEquals(e.getClass(),FileNotFoundException.class);
}
}
finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w",false,BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
Class: org.apache.hadoop.fs.TestLocalFileSystem
InternalCallVerifierAssumptionSetterEqualityVerifierHybridVerifier
@Test(timeout=1000) public void testListStatusWithColons() throws IOException {
assumeTrue(!Shell.WINDOWS);
File colonFile=new File(TEST_ROOT_DIR,"foo:bar");
colonFile.mkdirs();
FileStatus[] stats=fileSys.listStatus(new Path(TEST_ROOT_DIR));
assertEquals("Unexpected number of stats",1,stats.length);
assertEquals("Bad path from stat",colonFile.getAbsolutePath(),stats[0].getPath().toUri().getPath());
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test opening and reading from an InputStream through a hdfs:// URL.
*
* First generate a file with some content through the FileSystem API, then
* try to open and read the file through the URL stream API.
* @throws IOException
*/
@Test public void testDfsUrls() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
FsUrlStreamHandlerFactory factory=new org.apache.hadoop.fs.FsUrlStreamHandlerFactory();
java.net.URL.setURLStreamHandlerFactory(factory);
Path filePath=new Path("/thefile");
try {
byte[] fileContent=new byte[1024];
for (int i=0; i < fileContent.length; ++i) fileContent[i]=(byte)i;
OutputStream os=fs.create(filePath);
os.write(fileContent);
os.close();
URI uri=fs.getUri();
URL fileURL=new URL(uri.getScheme(),uri.getHost(),uri.getPort(),filePath.toString());
InputStream is=fileURL.openStream();
assertNotNull(is);
byte[] bytes=new byte[4096];
assertEquals(1024,is.read(bytes));
is.close();
for (int i=0; i < fileContent.length; ++i) assertEquals(fileContent[i],bytes[i]);
fs.delete(filePath,false);
}
finally {
fs.close();
cluster.shutdown();
}
}
APIUtilityVerifierIterativeVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test opening and reading from an InputStream through a file:// URL.
* @throws IOException
* @throws URISyntaxException
*/
@Test public void testFileUrls() throws IOException, URISyntaxException {
Configuration conf=new HdfsConfiguration();
if (!TEST_ROOT_DIR.exists()) {
if (!TEST_ROOT_DIR.mkdirs()) throw new IOException("Cannot create temporary directory: " + TEST_ROOT_DIR);
}
File tmpFile=new File(TEST_ROOT_DIR,"thefile");
URI uri=tmpFile.toURI();
FileSystem fs=FileSystem.get(uri,conf);
try {
byte[] fileContent=new byte[1024];
for (int i=0; i < fileContent.length; ++i) fileContent[i]=(byte)i;
OutputStream os=fs.create(new Path(uri.getPath()));
os.write(fileContent);
os.close();
URL fileURL=uri.toURL();
InputStream is=fileURL.openStream();
assertNotNull(is);
byte[] bytes=new byte[4096];
assertEquals(1024,is.read(bytes));
is.close();
for (int i=0; i < fileContent.length; ++i) assertEquals(fileContent[i],bytes[i]);
fs.delete(new Path(uri.getPath()),false);
}
finally {
fs.close();
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Tests that WASB works well with an older version container with ASV-era
* version and metadata.
*/
@Test public void testFirstContainerVersionMetadata() throws Exception {
HashMap containerMetadata=new HashMap();
containerMetadata.put(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY,AzureNativeFileSystemStore.FIRST_WASB_VERSION);
FsWithPreExistingContainer fsWithContainer=FsWithPreExistingContainer.create(containerMetadata);
assertFalse(fsWithContainer.getFs().exists(new Path("/IDontExist")));
assertEquals(0,fsWithContainer.getFs().listStatus(new Path("/")).length);
assertEquals(AzureNativeFileSystemStore.FIRST_WASB_VERSION,fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY));
assertNull(fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.VERSION_METADATA_KEY));
fsWithContainer.getFs().mkdirs(new Path("/dir"));
assertEquals(AzureNativeFileSystemStore.CURRENT_WASB_VERSION,fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.VERSION_METADATA_KEY));
assertNull(fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY));
fsWithContainer.close();
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Tests that WASB stamped the version in the container metadata if it does a
* write operation to a pre-existing container.
*/
@Test public void testPreExistingContainerVersionMetadata() throws Exception {
FsWithPreExistingContainer fsWithContainer=FsWithPreExistingContainer.create();
assertFalse(fsWithContainer.getFs().exists(new Path("/IDontExist")));
assertEquals(0,fsWithContainer.getFs().listStatus(new Path("/")).length);
assertNull(fsWithContainer.getContainerMetadata());
fsWithContainer.getFs().mkdirs(new Path("/dir"));
assertNotNull(fsWithContainer.getContainerMetadata());
assertEquals(AzureNativeFileSystemStore.CURRENT_WASB_VERSION,fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.VERSION_METADATA_KEY));
fsWithContainer.close();
}
InternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Tests that WASB stamped the version in the container metadata.
*/
@Test public void testContainerVersionMetadata() throws Exception {
fs.createNewFile(new Path("/foo"));
HashMap containerMetadata=backingStore.getContainerMetadata();
assertNotNull(containerMetadata);
assertEquals(AzureNativeFileSystemStore.CURRENT_WASB_VERSION,containerMetadata.get(AzureNativeFileSystemStore.VERSION_METADATA_KEY));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testMetricsOnFileRename() throws Exception {
long base=getBaseWebResponses();
Path originalPath=new Path("/metricsTest_RenameStart");
Path destinationPath=new Path("/metricsTest_RenameFinal");
assertEquals(0,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_CREATED));
assertTrue(fs.createNewFile(originalPath));
logOpResponseCount("Creating an empty file",base);
base=assertWebResponsesInRange(base,2,20);
assertEquals(1,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_CREATED));
assertTrue(fs.rename(originalPath,destinationPath));
logOpResponseCount("Renaming a file",base);
base=assertWebResponsesInRange(base,2,15);
assertNoErrors();
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testMetricsOnFileCreateRead() throws Exception {
long base=getBaseWebResponses();
assertEquals(0,AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()));
Path filePath=new Path("/metricsTest_webResponses");
final int FILE_SIZE=1000;
getBandwidthGaugeUpdater().suppressAutoUpdate();
Date start=new Date();
OutputStream outputStream=fs.create(filePath);
outputStream.write(nonZeroByteArray(FILE_SIZE));
outputStream.close();
long uploadDurationMs=new Date().getTime() - start.getTime();
logOpResponseCount("Creating a 1K file",base);
base=assertWebResponsesInRange(base,2,15);
getBandwidthGaugeUpdater().triggerUpdate(true);
long bytesWritten=AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation());
assertTrue("The bytes written in the last second " + bytesWritten + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",bytesWritten > (FILE_SIZE / 2) && bytesWritten < (FILE_SIZE * 2));
long totalBytesWritten=AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation());
assertTrue("The total bytes written " + totalBytesWritten + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2));
long uploadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_RATE);
System.out.println("Upload rate: " + uploadRate + " bytes/second.");
long expectedRate=(FILE_SIZE * 1000L) / uploadDurationMs;
assertTrue("The upload rate " + uploadRate + " is below the expected range of around "+ expectedRate+ " bytes/second that the unit test observed. This should never be"+ " the case since the test underestimates the rate by looking at "+ " end-to-end time instead of just block upload time.",uploadRate >= expectedRate);
long uploadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_LATENCY);
System.out.println("Upload latency: " + uploadLatency);
long expectedLatency=uploadDurationMs;
assertTrue("The upload latency " + uploadLatency + " should be greater than zero now that I've just uploaded a file.",uploadLatency > 0);
assertTrue("The upload latency " + uploadLatency + " is more than the expected range of around "+ expectedLatency+ " milliseconds that the unit test observed. This should never be"+ " the case since the test overestimates the latency by looking at "+ " end-to-end time instead of just block upload time.",uploadLatency <= expectedLatency);
start=new Date();
InputStream inputStream=fs.open(filePath);
int count=0;
while (inputStream.read() >= 0) {
count++;
}
inputStream.close();
long downloadDurationMs=new Date().getTime() - start.getTime();
assertEquals(FILE_SIZE,count);
logOpResponseCount("Reading a 1K file",base);
base=assertWebResponsesInRange(base,1,10);
getBandwidthGaugeUpdater().triggerUpdate(false);
long totalBytesRead=AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
assertEquals(FILE_SIZE,totalBytesRead);
long bytesRead=AzureMetricsTestUtil.getCurrentBytesRead(getInstrumentation());
assertTrue("The bytes read in the last second " + bytesRead + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",bytesRead > (FILE_SIZE / 2) && bytesRead < (FILE_SIZE * 2));
long downloadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_RATE);
System.out.println("Download rate: " + downloadRate + " bytes/second.");
expectedRate=(FILE_SIZE * 1000L) / downloadDurationMs;
assertTrue("The download rate " + downloadRate + " is below the expected range of around "+ expectedRate+ " bytes/second that the unit test observed. This should never be"+ " the case since the test underestimates the rate by looking at "+ " end-to-end time instead of just block download time.",downloadRate >= expectedRate);
long downloadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_LATENCY);
System.out.println("Download latency: " + downloadLatency);
expectedLatency=downloadDurationMs;
assertTrue("The download latency " + downloadLatency + " should be greater than zero now that I've just downloaded a file.",downloadLatency > 0);
assertTrue("The download latency " + downloadLatency + " is more than the expected range of around "+ expectedLatency+ " milliseconds that the unit test observed. This should never be"+ " the case since the test overestimates the latency by looking at "+ " end-to-end time instead of just block download time.",downloadLatency <= expectedLatency);
assertNoErrors();
}
BooleanVerifierEqualityVerifierHybridVerifier
@Test public void testMetricsOnBigFileCreateRead() throws Exception {
long base=getBaseWebResponses();
assertEquals(0,AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()));
Path filePath=new Path("/metricsTest_webResponses");
final int FILE_SIZE=100 * 1024 * 1024;
getBandwidthGaugeUpdater().suppressAutoUpdate();
OutputStream outputStream=fs.create(filePath);
outputStream.write(new byte[FILE_SIZE]);
outputStream.close();
logOpResponseCount("Creating a 100 MB file",base);
base=assertWebResponsesInRange(base,20,50);
getBandwidthGaugeUpdater().triggerUpdate(true);
long totalBytesWritten=AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation());
assertTrue("The total bytes written " + totalBytesWritten + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2));
long uploadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_RATE);
System.out.println("Upload rate: " + uploadRate + " bytes/second.");
long uploadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_LATENCY);
System.out.println("Upload latency: " + uploadLatency);
assertTrue("The upload latency " + uploadLatency + " should be greater than zero now that I've just uploaded a file.",uploadLatency > 0);
InputStream inputStream=fs.open(filePath);
int count=0;
while (inputStream.read() >= 0) {
count++;
}
inputStream.close();
assertEquals(FILE_SIZE,count);
logOpResponseCount("Reading a 100 MB file",base);
base=assertWebResponsesInRange(base,20,40);
getBandwidthGaugeUpdater().triggerUpdate(false);
long totalBytesRead=AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
assertEquals(FILE_SIZE,totalBytesRead);
long downloadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_RATE);
System.out.println("Download rate: " + downloadRate + " bytes/second.");
long downloadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_LATENCY);
System.out.println("Download latency: " + downloadLatency);
assertTrue("The download latency " + downloadLatency + " should be greater than zero now that I've just downloaded a file.",downloadLatency > 0);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testMetricsOnFileExistsDelete() throws Exception {
long base=getBaseWebResponses();
Path filePath=new Path("/metricsTest_delete");
assertFalse(fs.exists(filePath));
logOpResponseCount("Checking file existence for non-existent file",base);
base=assertWebResponsesInRange(base,1,3);
assertTrue(fs.createNewFile(filePath));
base=getCurrentWebResponses();
assertTrue(fs.exists(filePath));
logOpResponseCount("Checking file existence for existent file",base);
base=assertWebResponsesInRange(base,1,2);
assertEquals(0,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_DELETED));
assertTrue(fs.delete(filePath,false));
logOpResponseCount("Deleting a file",base);
base=assertWebResponsesInRange(base,1,4);
assertEquals(1,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_DELETED));
assertNoErrors();
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testSingleThreaded() throws Exception {
AzureFileSystemInstrumentation instrumentation=new AzureFileSystemInstrumentation(new Configuration());
BandwidthGaugeUpdater updater=new BandwidthGaugeUpdater(instrumentation,1000,true);
updater.triggerUpdate(true);
assertEquals(0,AzureMetricsTestUtil.getCurrentBytesWritten(instrumentation));
updater.blockUploaded(new Date(),new Date(),150);
updater.triggerUpdate(true);
assertEquals(150,AzureMetricsTestUtil.getCurrentBytesWritten(instrumentation));
updater.blockUploaded(new Date(new Date().getTime() - 10000),new Date(),200);
updater.triggerUpdate(true);
long currentBytes=AzureMetricsTestUtil.getCurrentBytesWritten(instrumentation);
assertTrue("We expect around (200/10 = 20) bytes written as the gauge value." + "Got " + currentBytes,currentBytes > 18 && currentBytes < 22);
updater.close();
}
BooleanVerifierAssumptionSetterHybridVerifier
@Test public void testFinalizerThreadShutdown() throws Exception {
System.gc();
System.runFinalization();
int nUpdaterThreadsStart=getWasbThreadCount();
assertTrue("Existing WASB threads have not been cleared",nUpdaterThreadsStart == 0);
final int nFilesystemsToSpawn=10;
AzureBlobStorageTestAccount testAccount=null;
for (int i=0; i < nFilesystemsToSpawn; i++) {
testAccount=AzureBlobStorageTestAccount.createMock();
testAccount.getFileSystem();
}
int nUpdaterThreadsAfterSpawn=getWasbThreadCount();
Assume.assumeTrue("Background threads should have spawned.",nUpdaterThreadsAfterSpawn == 10);
testAccount=null;
System.gc();
System.runFinalization();
int nUpdaterThreadsAfterCleanup=getWasbThreadCount();
assertTrue("Finalizers should have reduced the thread count. ",nUpdaterThreadsAfterCleanup == 0);
}
APIUtilityVerifierBranchVerifierUtilityVerifierBooleanVerifierHybridVerifier
/**
* Rename test -handles filesystems that will overwrite the destination
* as well as those that do not (i.e. HDFS).
* @throws Throwable
*/
@Test public void testRenameFileOverExistingFile() throws Throwable {
describe("Verify renaming a file onto an existing file matches expectations");
Path srcFile=path("source-256.txt");
byte[] srcData=dataset(256,'a','z');
writeDataset(getFileSystem(),srcFile,srcData,srcData.length,1024,false);
Path destFile=path("dest-512.txt");
byte[] destData=dataset(512,'A','Z');
writeDataset(getFileSystem(),destFile,destData,destData.length,1024,false);
assertIsFile(destFile);
boolean renameOverwritesDest=isSupported(RENAME_OVERWRITES_DEST);
boolean renameReturnsFalseOnRenameDestExists=!isSupported(RENAME_RETURNS_FALSE_IF_DEST_EXISTS);
boolean destUnchanged=true;
try {
boolean renamed=rename(srcFile,destFile);
if (renameOverwritesDest) {
assertTrue("Rename returned false",renamed);
destUnchanged=false;
}
else {
if (renamed && !renameReturnsFalseOnRenameDestExists) {
String destDirLS=generateAndLogErrorListing(srcFile,destFile);
getLog().error("dest dir {}",destDirLS);
fail("expected rename(" + srcFile + ", "+ destFile+ " ) to fail,"+ " but got success and destination of "+ destDirLS);
}
}
}
catch ( FileAlreadyExistsException e) {
handleExpectedException(e);
}
ContractTestUtils.verifyFileContents(getFileSystem(),destFile,destUnchanged ? destData : srcData);
}
APIUtilityVerifierBranchVerifierUtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
@Test public void testRenameNonexistentFile() throws Throwable {
describe("rename a file into a new file in the same directory");
Path missing=path("testRenameNonexistentFileSrc");
Path target=path("testRenameNonexistentFileDest");
boolean renameReturnsFalseOnFailure=isSupported(ContractOptions.RENAME_RETURNS_FALSE_IF_SOURCE_MISSING);
mkdirs(missing.getParent());
try {
boolean renamed=rename(missing,target);
if (!renameReturnsFalseOnFailure) {
String destDirLS=generateAndLogErrorListing(missing,target);
fail("expected rename(" + missing + ", "+ target+ " ) to fail,"+ " got a result of "+ renamed+ " and a destination directory of "+ destDirLS);
}
else {
getLog().warn("Rename returned {} renaming a nonexistent file",renamed);
assertFalse("Renaming a missing file returned true",renamed);
}
}
catch ( FileNotFoundException e) {
if (renameReturnsFalseOnFailure) {
ContractTestUtils.fail("Renaming a missing file unexpectedly threw an exception",e);
}
handleExpectedException(e);
}
catch ( IOException e) {
handleRelaxedException("rename nonexistent file","FileNotFoundException",e);
}
assertPathDoesNotExist("rename nonexistent file created a destination file",target);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testSeekAndReadPastEndOfFile() throws Throwable {
describe("verify that reading past the last bytes in the file returns -1");
instream=getFileSystem().open(smallSeekFile);
assertEquals(0,instream.getPos());
instream.seek(TEST_FILE_LEN - 2);
assertTrue("Premature EOF",instream.read() != -1);
assertTrue("Premature EOF",instream.read() != -1);
assertMinusOne("read past end of file",instream.read());
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testPositionedBulkReadDoesntChangePosition() throws Throwable {
describe("verify that a positioned read does not change the getPos() value");
Path testSeekFile=path("bigseekfile.txt");
byte[] block=dataset(65536,0,255);
createFile(getFileSystem(),testSeekFile,false,block);
instream=getFileSystem().open(testSeekFile);
instream.seek(39999);
assertTrue(-1 != instream.read());
assertEquals(40000,instream.getPos());
byte[] readBuffer=new byte[256];
instream.read(128,readBuffer,0,readBuffer.length);
assertEquals(40000,instream.getPos());
assertEquals("@40000",block[40000],(byte)instream.read());
for (int i=0; i < 256; i++) {
assertEquals("@" + i,block[i + 128],readBuffer[i]);
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testNegativeSeek() throws Throwable {
instream=getFileSystem().open(smallSeekFile);
assertEquals(0,instream.getPos());
try {
instream.seek(-1);
long p=instream.getPos();
LOG.warn("Seek to -1 returned a position of " + p);
int result=instream.read();
fail("expected an exception, got data " + result + " at a position of "+ p);
}
catch ( EOFException e) {
handleExpectedException(e);
}
catch ( IOException e) {
handleRelaxedException("a negative seek","EOFException",e);
}
assertEquals(0,instream.getPos());
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test ACL operations on a directory, including default ACLs.
* General strategy is to use GETFILESTATUS and GETACLSTATUS to verify:
*
*
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Validate the various ACL set/modify/remove calls. General strategy is
* to verify each of the following steps with GETFILESTATUS, LISTSTATUS,
* and GETACLSTATUS:
*
*
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekAndReadPastEndOfFile() throws Throwable {
instream=fs.open(smallSeekFile);
assertEquals(0,instream.getPos());
instream.seek(SMALL_SEEK_FILE_LEN - 2);
assertTrue("Premature EOF",instream.read() != -1);
assertTrue("Premature EOF",instream.read() != -1);
assertMinusOne("read past end of file",instream.read());
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testNegativeSeek() throws Throwable {
instream=fs.open(smallSeekFile);
assertEquals(0,instream.getPos());
try {
instream.seek(-1);
long p=instream.getPos();
LOG.warn("Seek to -1 returned a position of " + p);
int result=instream.read();
fail("expected an exception, got data " + result + " at a position of "+ p);
}
catch ( IOException e) {
}
assertEquals(0,instream.getPos());
}
BooleanVerifierNullVerifierHybridVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testLocateOutOfRangeLen() throws Throwable {
describe("overshooting the length is legal, as long as the" + " origin location is valid");
BlockLocation[] locations=getFs().getFileBlockLocations(createFileAndGetStatus(),0,data.length + 100);
assertNotNull(locations);
assertTrue(locations.length > 0);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* test that a dir off root has a listStatus() call that
* works as expected. and that when a child is added. it changes
* @throws Exception on failures
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testDirectoriesOffRootHaveMatchingFileStatus() throws Exception {
Path test=path("/test");
fs.delete(test,true);
mkdirs(test);
assertExists("created test directory",test);
FileStatus[] statuses=fs.listStatus(test);
String statusString=statusToString(test.toString(),statuses);
assertEquals("Wrong number of elements in file status " + statusString,0,statuses.length);
Path src=path("/test/file");
SwiftTestUtils.touch(fs,src);
statuses=fs.listStatus(test);
statusString=statusToString(test.toString(),statuses);
assertEquals("Wrong number of elements in file status " + statusString,1,statuses.length);
SwiftFileStatus stat=(SwiftFileStatus)statuses[0];
assertTrue("isDir(): Not a directory: " + stat,stat.isDir());
extraStatusAssertions(stat);
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Assert that a filesystem is case sensitive.
* This is done by creating a mixed-case filename and asserting that
* its lower case version is not there.
* @throws Exception failures
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testFilesystemIsCaseSensitive() throws Exception {
String mixedCaseFilename="/test/UPPER.TXT";
Path upper=path(mixedCaseFilename);
Path lower=path(mixedCaseFilename.toLowerCase(Locale.ENGLISH));
assertFalse("File exists" + upper,fs.exists(upper));
assertFalse("File exists" + lower,fs.exists(lower));
FSDataOutputStream out=fs.create(upper);
out.writeUTF("UPPER");
out.close();
FileStatus upperStatus=fs.getFileStatus(upper);
assertExists("Original upper case file" + upper,upper);
assertPathDoesNotExist("lower case file",lower);
out=fs.create(lower);
out.writeUTF("l");
out.close();
assertExists("lower case file",lower);
assertExists("Original upper case file " + upper,upper);
FileStatus newStatus=fs.getFileStatus(upper);
assertEquals("Expected status:" + upperStatus + " actual status "+ newStatus,upperStatus.getLen(),newStatus.getLen());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testRenameFile() throws Exception {
assumeRenameSupported();
final Path old=new Path("/test/alice/file");
final Path newPath=new Path("/test/bob/file");
fs.mkdirs(newPath.getParent());
final FSDataOutputStream fsDataOutputStream=fs.create(old);
final byte[] message="Some data".getBytes();
fsDataOutputStream.write(message);
fsDataOutputStream.close();
assertTrue(fs.exists(old));
rename(old,newPath,true,false,true);
final FSDataInputStream bobStream=fs.open(newPath);
final byte[] bytes=new byte[512];
final int read=bobStream.read(bytes);
bobStream.close();
final byte[] buffer=new byte[read];
System.arraycopy(bytes,0,buffer,0,read);
assertEquals(new String(message),new String(buffer));
}
APIUtilityVerifierBranchVerifierUtilityVerifierBooleanVerifierHybridVerifier
/**
* Rename a file into a directory
* @throws Exception
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testRenameFileIntoExistingDirectory() throws Exception {
assumeRenameSupported();
Path src=path("/test/olddir/file");
createFile(src);
Path dst=path("/test/new/newdir");
fs.mkdirs(dst);
rename(src,dst,true,false,true);
Path newFile=path("/test/new/newdir/file");
if (!fs.exists(newFile)) {
String ls=ls(dst);
LOG.info(ls(path("/test/new")));
LOG.info(ls(path("/test/hadoop")));
fail("did not find " + newFile + " - directory: "+ ls);
}
assertTrue("Destination changed",fs.exists(path("/test/new/newdir/file")));
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test that default blocksize values can be retrieved on the client side.
*/
@Test public void testGetDefaultBlockSize() throws IOException, URISyntaxException {
try {
vfs.getDefaultBlockSize();
fail("getServerDefaults on viewFs did not throw excetion!");
}
catch ( NotInMountpointException e) {
assertEquals(vfs.getDefaultBlockSize(testFilePath),DFS_BLOCK_SIZE_DEFAULT);
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test that server default values can be retrieved on the client side.
*/
@Test public void testServerDefaults() throws IOException {
try {
FsServerDefaults serverDefaults=vfs.getServerDefaults();
fail("getServerDefaults on viewFs did not throw excetion!");
}
catch ( NotInMountpointException e) {
FsServerDefaults serverDefaults=vfs.getServerDefaults(testFilePath);
assertEquals(DFS_BLOCK_SIZE_DEFAULT,serverDefaults.getBlockSize());
assertEquals(DFS_BYTES_PER_CHECKSUM_DEFAULT,serverDefaults.getBytesPerChecksum());
assertEquals(DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT,serverDefaults.getWritePacketSize());
assertEquals(IO_FILE_BUFFER_SIZE_DEFAULT,serverDefaults.getFileBufferSize());
assertEquals(DFS_REPLICATION_DEFAULT + 1,serverDefaults.getReplication());
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test that default replication values can be retrieved on the client side.
*/
@Test public void testGetDefaultReplication() throws IOException, URISyntaxException {
try {
vfs.getDefaultReplication();
fail("getDefaultReplication on viewFs did not throw excetion!");
}
catch ( NotInMountpointException e) {
assertEquals(vfs.getDefaultReplication(testFilePath),DFS_REPLICATION_DEFAULT + 1);
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testGetFileChecksum() throws IOException, URISyntaxException {
fileSystemTestHelper.createFile(fHdfs,someFile);
fileSystemTestHelper.createFile(fHdfs,fileSystemTestHelper.getTestRootPath(fHdfs,someFile + "other"),1,512);
FileChecksum viewFSCheckSum=vfs.getFileChecksum(new Path("/vfstmp/someFileForTestGetFileChecksum"));
FileChecksum hdfsCheckSum=fHdfs.getFileChecksum(new Path(someFile));
FileChecksum otherHdfsFileCheckSum=fHdfs.getFileChecksum(new Path(someFile + "other"));
assertEquals("HDFS and ViewFS checksums were not the same",viewFSCheckSum,hdfsCheckSum);
assertFalse("Some other HDFS file which should not have had the same " + "checksum as viewFS did!",viewFSCheckSum.equals(otherHdfsFileCheckSum));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testInternalGetAclStatus() throws IOException {
final UserGroupInformation currentUser=UserGroupInformation.getCurrentUser();
AclStatus aclStatus=fsView.getAclStatus(new Path("/internalDir"));
assertEquals(aclStatus.getOwner(),currentUser.getUserName());
assertEquals(aclStatus.getGroup(),currentUser.getGroupNames()[0]);
assertEquals(aclStatus.getEntries(),AclUtil.getMinimalAcl(PERMISSION_555));
assertFalse(aclStatus.isStickyBit());
}
InternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testListOnMountTargetDirs() throws IOException {
FileStatus[] dirPaths=fsView.listStatus(new Path("/data"));
FileStatus fs;
Assert.assertEquals(0,dirPaths.length);
long len=fileSystemTestHelper.createFile(fsView,"/data/foo");
dirPaths=fsView.listStatus(new Path("/data"));
Assert.assertEquals(1,dirPaths.length);
fs=fileSystemTestHelper.containsPath(fsView,"/data/foo",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("Created file shoudl appear as a file",fs.isFile());
Assert.assertEquals(len,fs.getLen());
fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView,"/data/dirX"));
dirPaths=fsView.listStatus(new Path("/data"));
Assert.assertEquals(2,dirPaths.length);
fs=fileSystemTestHelper.containsPath(fsView,"/data/foo",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("Created file shoudl appear as a file",fs.isFile());
fs=fileSystemTestHelper.containsPath(fsView,"/data/dirX",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("Created dir should appear as a dir",fs.isDirectory());
}
InternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test "readOps" (e.g. list, listStatus)
* on internal dirs of mount table
* These operations should succeed.
*/
@Test public void testListOnInternalDirsOfMountTable() throws IOException {
FileStatus[] dirPaths=fsView.listStatus(new Path("/"));
FileStatus fs;
verifyRootChildren(dirPaths);
dirPaths=fsView.listStatus(new Path("/internalDir"));
Assert.assertEquals(2,dirPaths.length);
fs=fileSystemTestHelper.containsPath(fsView,"/internalDir/internalDir2",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isDirectory());
fs=fileSystemTestHelper.containsPath(fsView,"/internalDir/linkToDir2",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
}
Class: org.apache.hadoop.fs.viewfs.ViewFsBaseTest
BooleanVerifierExceptionVerifierHybridVerifier
@Test(expected=AccessControlException.class) public void testInternalDeleteExisting2() throws IOException {
Assert.assertTrue("Delete of link to dir should succeed",fcView.getFileStatus(new Path("/internalDir/linkToDir2")).isDirectory());
fcView.delete(new Path("/internalDir/linkToDir2"),false);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testInternalGetAclStatus() throws IOException {
final UserGroupInformation currentUser=UserGroupInformation.getCurrentUser();
AclStatus aclStatus=fcView.getAclStatus(new Path("/internalDir"));
assertEquals(aclStatus.getOwner(),currentUser.getUserName());
assertEquals(aclStatus.getGroup(),currentUser.getGroupNames()[0]);
assertEquals(aclStatus.getEntries(),AclUtil.getMinimalAcl(PERMISSION_555));
assertFalse(aclStatus.isStickyBit());
}
UtilityVerifierBooleanVerifierHybridVerifier
@Test public void testFileStatusOnMountLink() throws IOException {
Assert.assertTrue("Slash should appear as dir",fcView.getFileStatus(new Path("/")).isDirectory());
checkFileStatus(fcView,"/",fileType.isDir);
checkFileStatus(fcView,"/user",fileType.isDir);
checkFileStatus(fcView,"/data",fileType.isDir);
checkFileStatus(fcView,"/internalDir",fileType.isDir);
checkFileStatus(fcView,"/internalDir/linkToDir2",fileType.isDir);
checkFileStatus(fcView,"/internalDir/internalDir2/linkToDir3",fileType.isDir);
checkFileStatus(fcView,"/linkToAFile",fileType.isFile);
try {
fcView.getFileStatus(new Path("/danglingLink"));
Assert.fail("Excepted a not found exception here");
}
catch ( FileNotFoundException e) {
}
}
BooleanVerifierExceptionVerifierHybridVerifier
@Test(expected=AccessControlException.class) public void testInternalRename2() throws IOException {
Assert.assertTrue("linkTODir2 should be a dir",fcView.getFileStatus(new Path("/internalDir/linkToDir2")).isDirectory());
fcView.rename(new Path("/internalDir/linkToDir2"),new Path("/internalDir/dir1"));
}
InternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test "readOps" (e.g. list, listStatus)
* on internal dirs of mount table
* These operations should succeed.
*/
@Test public void testListOnInternalDirsOfMountTable() throws IOException {
FileStatus[] dirPaths=fcView.util().listStatus(new Path("/"));
FileStatus fs;
Assert.assertEquals(7,dirPaths.length);
fs=fileContextTestHelper.containsPath(fcView,"/user",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
fs=fileContextTestHelper.containsPath(fcView,"/data",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
fs=fileContextTestHelper.containsPath(fcView,"/internalDir",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("InternalDirs should appear as dir",fs.isDirectory());
fs=fileContextTestHelper.containsPath(fcView,"/danglingLink",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
fs=fileContextTestHelper.containsPath(fcView,"/linkToAFile",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
dirPaths=fcView.util().listStatus(new Path("/internalDir"));
Assert.assertEquals(2,dirPaths.length);
fs=fileContextTestHelper.containsPath(fcView,"/internalDir/internalDir2",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("InternalDirs should appear as dir",fs.isDirectory());
fs=fileContextTestHelper.containsPath(fcView,"/internalDir/linkToDir2",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* verify that receiveActiveData gives data when active exists, tells that
* active does not exist and reports error in getting active information
* @throws IOException
* @throws InterruptedException
* @throws KeeperException
* @throws ActiveNotFoundException
*/
@Test public void testGetActiveData() throws ActiveNotFoundException, KeeperException, InterruptedException, IOException {
byte[] data=new byte[8];
Mockito.when(mockZK.getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject())).thenReturn(data);
Assert.assertEquals(data,elector.getActiveData());
Mockito.verify(mockZK,Mockito.times(1)).getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject());
Mockito.when(mockZK.getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject())).thenThrow(new KeeperException.NoNodeException());
try {
elector.getActiveData();
Assert.fail("ActiveNotFoundException expected");
}
catch ( ActiveNotFoundException e) {
Mockito.verify(mockZK,Mockito.times(2)).getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject());
}
try {
Mockito.when(mockZK.getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject())).thenThrow(new KeeperException.AuthFailedException());
elector.getActiveData();
Assert.fail("KeeperException.AuthFailedException expected");
}
catch ( KeeperException.AuthFailedException ke) {
Mockito.verify(mockZK,Mockito.times(3)).getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject());
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Verify that, when the callback fails to enter active state,
* the elector rejoins the election after sleeping for a short period.
*/
@Test public void testFailToBecomeActive() throws Exception {
mockNoPriorActive();
elector.joinElection(data);
Assert.assertEquals(0,elector.sleptFor);
Mockito.doThrow(new ServiceFailedException("failed to become active")).when(mockApp).becomeActive();
elector.processResult(Code.OK.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME);
Mockito.verify(mockApp).becomeActive();
Mockito.verify(mockZK,Mockito.times(2)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK);
Assert.assertEquals(2,count);
Assert.assertTrue(elector.sleptFor > 0);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Verify that, when the callback fails to enter active state, after
* a ZK disconnect (i.e from the StatCallback), that the elector rejoins
* the election after sleeping for a short period.
*/
@Test public void testFailToBecomeActiveAfterZKDisconnect() throws Exception {
mockNoPriorActive();
elector.joinElection(data);
Assert.assertEquals(0,elector.sleptFor);
elector.processResult(Code.CONNECTIONLOSS.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME);
Mockito.verify(mockZK,Mockito.times(2)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK);
elector.processResult(Code.NODEEXISTS.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME);
verifyExistCall(1);
Stat stat=new Stat();
stat.setEphemeralOwner(1L);
Mockito.when(mockZK.getSessionId()).thenReturn(1L);
Mockito.doThrow(new ServiceFailedException("fail to become active")).when(mockApp).becomeActive();
elector.processResult(Code.OK.intValue(),ZK_LOCK_NAME,mockZK,stat);
Mockito.verify(mockApp,Mockito.times(1)).becomeActive();
Mockito.verify(mockZK,Mockito.times(3)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK);
Assert.assertEquals(2,count);
Assert.assertTrue(elector.sleptFor > 0);
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test(timeout=15000) public void testGracefulFailoverFailBecomingActive() throws Exception {
try {
cluster.start();
cluster.waitForActiveLockHolder(0);
cluster.setFailToBecomeActive(1,true);
try {
cluster.getService(1).getZKFCProxy(conf,5000).gracefulFailover();
fail("Did not fail to graceful failover when target failed " + "to become active!");
}
catch ( ServiceFailedException sfe) {
GenericTestUtils.assertExceptionContains("Couldn't make " + cluster.getService(1) + " active",sfe);
GenericTestUtils.assertExceptionContains("injected failure",sfe);
}
assertEquals(0,cluster.getService(0).fenceCount);
assertEquals(0,cluster.getService(1).fenceCount);
cluster.waitForActiveLockHolder(0);
}
finally {
cluster.stop();
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that the ZKFC can gracefully cede its active status.
*/
@Test(timeout=15000) public void testCedeActive() throws Exception {
try {
cluster.start();
DummyZKFC zkfc=cluster.getZkfc(0);
assertEquals(ActiveStandbyElector.State.ACTIVE,zkfc.getElectorForTests().getStateForTests());
ZKFCProtocol proxy=zkfc.getLocalTarget().getZKFCProxy(conf,5000);
long st=Time.now();
proxy.cedeActive(3000);
long et=Time.now();
assertTrue("RPC to cedeActive took " + (et - st) + " ms",et - st < 1000);
assertEquals(ActiveStandbyElector.State.INIT,zkfc.getElectorForTests().getStateForTests());
cluster.waitForElectorState(0,ActiveStandbyElector.State.STANDBY);
long et2=Time.now();
assertTrue("Should take ~3 seconds to rejoin. Only took " + (et2 - et) + "ms before rejoining.",et2 - et > 2800);
}
finally {
cluster.stop();
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test that, if ACLs are specified in the configuration, that
* it sets the ACLs when formatting the parent node.
*/
@Test(timeout=15000) public void testFormatSetsAcls() throws Exception {
DummyHAService svc=cluster.getService(1);
assertEquals(0,runFC(svc,"-formatZK"));
ZooKeeper otherClient=createClient();
try {
Stat stat=new Stat();
otherClient.getData(ZKFailoverController.ZK_PARENT_ZNODE_DEFAULT,false,stat);
fail("Was able to read data without authenticating!");
}
catch ( KeeperException.NoAuthException nae) {
}
}
UtilityVerifierBooleanVerifierHybridVerifier
/**
* Test the case where we have a failure to complete a short circuit read
* that occurs, and then later on, we have a success.
* Any thread waiting on a cache load should receive the failure (if it
* occurs); however, the failure result should not be cached. We want
* to be able to retry later and succeed.
*/
@Test(timeout=60000) public void testShortCircuitCacheTemporaryFailure() throws Exception {
BlockReaderTestUtil.enableBlockReaderFactoryTracing();
final AtomicBoolean replicaCreationShouldFail=new AtomicBoolean(true);
final AtomicBoolean testFailed=new AtomicBoolean(false);
DFSInputStream.tcpReadsDisabledForTesting=true;
BlockReaderFactory.createShortCircuitReplicaInfoCallback=new ShortCircuitCache.ShortCircuitReplicaCreator(){
@Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo(){
if (replicaCreationShouldFail.get()) {
Uninterruptibles.sleepUninterruptibly(2,TimeUnit.SECONDS);
return new ShortCircuitReplicaInfo();
}
return null;
}
}
;
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testShortCircuitCacheTemporaryFailure",sockDir);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4000;
final int NUM_THREADS=2;
final int SEED=0xFADED;
final CountDownLatch gotFailureLatch=new CountDownLatch(NUM_THREADS);
final CountDownLatch shouldRetryLatch=new CountDownLatch(1);
DFSTestUtil.createFile(dfs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
Runnable readerRunnable=new Runnable(){
@Override public void run(){
try {
List locatedBlocks=cluster.getNameNode().getRpcServer().getBlockLocations(TEST_FILE,0,TEST_FILE_LEN).getLocatedBlocks();
LocatedBlock lblock=locatedBlocks.get(0);
BlockReader blockReader=null;
try {
blockReader=BlockReaderTestUtil.getBlockReader(cluster,lblock,0,TEST_FILE_LEN);
Assert.fail("expected getBlockReader to fail the first time.");
}
catch ( Throwable t) {
Assert.assertTrue("expected to see 'TCP reads were disabled " + "for testing' in exception " + t,t.getMessage().contains("TCP reads were disabled for testing"));
}
finally {
if (blockReader != null) blockReader.close();
}
gotFailureLatch.countDown();
shouldRetryLatch.await();
try {
blockReader=BlockReaderTestUtil.getBlockReader(cluster,lblock,0,TEST_FILE_LEN);
}
catch ( Throwable t) {
LOG.error("error trying to retrieve a block reader " + "the second time.",t);
throw t;
}
finally {
if (blockReader != null) blockReader.close();
}
}
catch ( Throwable t) {
LOG.error("getBlockReader failure",t);
testFailed.set(true);
}
}
}
;
Thread threads[]=new Thread[NUM_THREADS];
for (int i=0; i < NUM_THREADS; i++) {
threads[i]=new Thread(readerRunnable);
threads[i].start();
}
gotFailureLatch.await();
replicaCreationShouldFail.set(false);
shouldRetryLatch.countDown();
for (int i=0; i < NUM_THREADS; i++) {
Uninterruptibles.joinUninterruptibly(threads[i]);
}
cluster.shutdown();
sockDir.close();
Assert.assertFalse(testFailed.get());
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* When an InterruptedException is sent to a thread calling
* FileChannel#read, the FileChannel is immediately closed and the
* thread gets an exception. This effectively means that we might have
* someone asynchronously calling close() on the file descriptors we use
* in BlockReaderLocal. So when unreferencing a ShortCircuitReplica in
* ShortCircuitCache#unref, we should check if the FileChannel objects
* are still open. If not, we should purge the replica to avoid giving
* it out to any future readers.
* This is a regression test for HDFS-6227: Short circuit read failed
* due to ClosedChannelException.
* Note that you may still get ClosedChannelException errors if two threads
* are reading from the same replica and an InterruptedException is delivered
* to one of them.
*/
@Test(timeout=120000) public void testPurgingClosedReplicas() throws Exception {
BlockReaderTestUtil.enableBlockReaderFactoryTracing();
final AtomicInteger replicasCreated=new AtomicInteger(0);
final AtomicBoolean testFailed=new AtomicBoolean(false);
DFSInputStream.tcpReadsDisabledForTesting=true;
BlockReaderFactory.createShortCircuitReplicaInfoCallback=new ShortCircuitCache.ShortCircuitReplicaCreator(){
@Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo(){
replicasCreated.incrementAndGet();
return null;
}
}
;
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testPurgingClosedReplicas",sockDir);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4095;
final int SEED=0xFADE0;
final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),conf);
DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
final Semaphore sem=new Semaphore(0);
final List locatedBlocks=cluster.getNameNode().getRpcServer().getBlockLocations(TEST_FILE,0,TEST_FILE_LEN).getLocatedBlocks();
final LocatedBlock lblock=locatedBlocks.get(0);
final byte[] buf=new byte[TEST_FILE_LEN];
Runnable readerRunnable=new Runnable(){
@Override public void run(){
try {
while (true) {
BlockReader blockReader=null;
try {
blockReader=BlockReaderTestUtil.getBlockReader(cluster,lblock,0,TEST_FILE_LEN);
sem.release();
try {
blockReader.readAll(buf,0,TEST_FILE_LEN);
}
finally {
sem.acquireUninterruptibly();
}
}
catch ( ClosedByInterruptException e) {
LOG.info("got the expected ClosedByInterruptException",e);
sem.release();
break;
}
finally {
if (blockReader != null) blockReader.close();
}
LOG.info("read another " + TEST_FILE_LEN + " bytes.");
}
}
catch ( Throwable t) {
LOG.error("getBlockReader failure",t);
testFailed.set(true);
sem.release();
}
}
}
;
Thread thread=new Thread(readerRunnable);
thread.start();
while (thread.isAlive()) {
sem.acquireUninterruptibly();
thread.interrupt();
sem.release();
}
Assert.assertFalse(testFailed.get());
BlockReader blockReader=null;
try {
blockReader=BlockReaderTestUtil.getBlockReader(cluster,lblock,0,TEST_FILE_LEN);
blockReader.readFully(buf,0,TEST_FILE_LEN);
}
finally {
if (blockReader != null) blockReader.close();
}
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(buf,expected));
Assert.assertEquals(2,replicasCreated.get());
dfs.close();
cluster.shutdown();
sockDir.close();
}
TestInitializerAssumptionSetterHybridVerifier
@Before public void init(){
DomainSocket.disableBindPathValidation();
Assume.assumeThat(DomainSocket.getLoadingFailureReason(),equalTo(null));
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that a client which supports short-circuit reads using
* shared memory can fall back to not using shared memory when
* the server doesn't support it.
*/
@Test public void testShortCircuitReadFromServerWithoutShm() throws Exception {
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration clientConf=createShortCircuitConf("testShortCircuitReadFromServerWithoutShm",sockDir);
Configuration serverConf=new Configuration(clientConf);
serverConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,0);
DFSInputStream.tcpReadsDisabledForTesting=true;
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
clientConf.set(DFS_CLIENT_CONTEXT,"testShortCircuitReadFromServerWithoutShm_clientContext");
final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),clientConf);
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4000;
final int SEED=0xFADEC;
DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
byte contents[]=DFSTestUtil.readFileBuffer(fs,new Path(TEST_FILE));
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
final ShortCircuitCache cache=fs.dfs.getClientContext().getShortCircuitCache();
final DatanodeInfo datanode=new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
Assert.assertEquals(1,info.size());
PerDatanodeVisitorInfo vinfo=info.get(datanode);
Assert.assertTrue(vinfo.disabled);
Assert.assertEquals(0,vinfo.full.size());
Assert.assertEquals(0,vinfo.notFull.size());
}
}
);
cluster.shutdown();
}
APIUtilityVerifierBranchVerifierUtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
/**
* Test the case where we have multiple threads waiting on the
* ShortCircuitCache delivering a certain ShortCircuitReplica.
* In this case, there should only be one call to
* createShortCircuitReplicaInfo. This one replica should be shared
* by all threads.
*/
@Test(timeout=60000) public void testMultipleWaitersOnShortCircuitCache() throws Exception {
final CountDownLatch latch=new CountDownLatch(1);
final AtomicBoolean creationIsBlocked=new AtomicBoolean(true);
final AtomicBoolean testFailed=new AtomicBoolean(false);
DFSInputStream.tcpReadsDisabledForTesting=true;
BlockReaderFactory.createShortCircuitReplicaInfoCallback=new ShortCircuitCache.ShortCircuitReplicaCreator(){
@Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo(){
Uninterruptibles.awaitUninterruptibly(latch);
if (!creationIsBlocked.compareAndSet(true,false)) {
Assert.fail("there were multiple calls to " + "createShortCircuitReplicaInfo. Only one was expected.");
}
return null;
}
}
;
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testMultipleWaitersOnShortCircuitCache",sockDir);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4000;
final int SEED=0xFADED;
final int NUM_THREADS=10;
DFSTestUtil.createFile(dfs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
Runnable readerRunnable=new Runnable(){
@Override public void run(){
try {
byte contents[]=DFSTestUtil.readFileBuffer(dfs,new Path(TEST_FILE));
Assert.assertFalse(creationIsBlocked.get());
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
}
catch ( Throwable e) {
LOG.error("readerRunnable error",e);
testFailed.set(true);
}
}
}
;
Thread threads[]=new Thread[NUM_THREADS];
for (int i=0; i < NUM_THREADS; i++) {
threads[i]=new Thread(readerRunnable);
threads[i].start();
}
Thread.sleep(500);
latch.countDown();
for (int i=0; i < NUM_THREADS; i++) {
Uninterruptibles.joinUninterruptibly(threads[i]);
}
cluster.shutdown();
sockDir.close();
Assert.assertFalse(testFailed.get());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* Test that a client which does not support short-circuit reads using
* shared memory can talk with a server which supports it.
*/
@Test public void testShortCircuitReadFromClientWithoutShm() throws Exception {
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration clientConf=createShortCircuitConf("testShortCircuitReadWithoutShm",sockDir);
Configuration serverConf=new Configuration(clientConf);
DFSInputStream.tcpReadsDisabledForTesting=true;
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
clientConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,0);
clientConf.set(DFS_CLIENT_CONTEXT,"testShortCircuitReadFromClientWithoutShm_clientContext");
final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),clientConf);
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4000;
final int SEED=0xFADEC;
DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
byte contents[]=DFSTestUtil.readFileBuffer(fs,new Path(TEST_FILE));
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
final ShortCircuitCache cache=fs.dfs.getClientContext().getShortCircuitCache();
Assert.assertEquals(null,cache.getDfsClientShmManager());
cluster.shutdown();
}
AssumptionSetterEqualityVerifierHybridVerifier
@Test public void testBothOldAndNewShortCircuitConfigured() throws Exception {
final short REPL_FACTOR=1;
final int FILE_LENGTH=512;
Assume.assumeTrue(null == DomainSocket.getLoadingFailureReason());
TemporarySocketDirectory socketDir=new TemporarySocketDirectory();
HdfsConfiguration conf=getConfiguration(socketDir);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
socketDir.close();
FileSystem fs=cluster.getFileSystem();
Path path=new Path("/foo");
byte orig[]=new byte[FILE_LENGTH];
for (int i=0; i < orig.length; i++) {
orig[i]=(byte)(i % 10);
}
FSDataOutputStream fos=fs.create(path,(short)1);
fos.write(orig);
fos.close();
DFSTestUtil.waitReplication(fs,path,REPL_FACTOR);
FSDataInputStream fis=cluster.getFileSystem().open(path);
byte buf[]=new byte[FILE_LENGTH];
IOUtils.readFully(fis,buf,0,FILE_LENGTH);
fis.close();
Assert.assertArrayEquals(orig,buf);
Arrays.equals(orig,buf);
cluster.shutdown();
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that, in the case of an error, the position and limit of a ByteBuffer
* are left unchanged. This is not mandated by ByteBufferReadable, but clients
* of this class might immediately issue a retry on failure, so it's polite.
*/
@Test public void testStablePositionAfterCorruptRead() throws Exception {
final short REPL_FACTOR=1;
final long FILE_LENGTH=512L;
HdfsConfiguration conf=getConfiguration(null);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path path=new Path("/corrupted");
DFSTestUtil.createFile(fs,path,FILE_LENGTH,REPL_FACTOR,12345L);
DFSTestUtil.waitReplication(fs,path,REPL_FACTOR);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,path);
int blockFilesCorrupted=cluster.corruptBlockOnDataNodes(block);
assertEquals("All replicas not corrupted",REPL_FACTOR,blockFilesCorrupted);
FSDataInputStream dis=cluster.getFileSystem().open(path);
ByteBuffer buf=ByteBuffer.allocateDirect((int)FILE_LENGTH);
boolean sawException=false;
try {
dis.read(buf);
}
catch ( ChecksumException ex) {
sawException=true;
}
assertTrue(sawException);
assertEquals(0,buf.position());
assertEquals(buf.capacity(),buf.limit());
dis=cluster.getFileSystem().open(path);
buf.position(3);
buf.limit(25);
sawException=false;
try {
dis.read(buf);
}
catch ( ChecksumException ex) {
sawException=true;
}
assertTrue(sawException);
assertEquals(3,buf.position());
assertEquals(25,buf.limit());
cluster.shutdown();
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Make sure that client failover works when an active NN dies and the standby
* takes over.
*/
@Test public void testDfsClientFailover() throws IOException, URISyntaxException {
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
DFSTestUtil.createFile(fs,TEST_FILE,FILE_LENGTH_TO_VERIFY,(short)1,1L);
assertEquals(fs.getFileStatus(TEST_FILE).getLen(),FILE_LENGTH_TO_VERIFY);
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
assertEquals(fs.getFileStatus(TEST_FILE).getLen(),FILE_LENGTH_TO_VERIFY);
Path withPort=new Path("hdfs://" + HATestUtil.getLogicalHostname(cluster) + ":"+ NameNode.DEFAULT_PORT+ "/"+ TEST_FILE.toUri().getPath());
FileSystem fs2=withPort.getFileSystem(fs.getConf());
assertTrue(fs2.exists(withPort));
fs.close();
}
UtilityVerifierBooleanVerifierHybridVerifier
/**
* Make sure that a helpful error message is shown if a proxy provider is
* configured for a given URI, but no actual addresses are configured for that
* URI.
*/
@Test public void testFailureWithMisconfiguredHaNNs() throws Exception {
String logicalHost="misconfigured-ha-uri";
Configuration conf=new Configuration();
conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHost,ConfiguredFailoverProxyProvider.class.getName());
URI uri=new URI("hdfs://" + logicalHost + "/test");
try {
FileSystem.get(uri,conf).exists(new Path("/test"));
fail("Successfully got proxy provider for misconfigured FS");
}
catch ( IOException ioe) {
LOG.info("got expected exception",ioe);
assertTrue("expected exception did not contain helpful message",StringUtils.stringifyException(ioe).contains("Could not find any configured addresses for URI " + uri));
}
}
APIUtilityVerifierIterativeVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test that checksum failures are recovered from by the next read on the same
* DFSInputStream. Corruption information is not persisted from read call to
* read call, so the client should expect consecutive calls to behave the same
* way. See HDFS-3067.
*/
@Test public void testRetryOnChecksumFailure() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
final short REPL_FACTOR=1;
final long FILE_LENGTH=512L;
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path path=new Path("/corrupted");
DFSTestUtil.createFile(fs,path,FILE_LENGTH,REPL_FACTOR,12345L);
DFSTestUtil.waitReplication(fs,path,REPL_FACTOR);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,path);
int blockFilesCorrupted=cluster.corruptBlockOnDataNodes(block);
assertEquals("All replicas not corrupted",REPL_FACTOR,blockFilesCorrupted);
InetSocketAddress nnAddr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(nnAddr,conf);
DFSInputStream dis=client.open(path.toString());
byte[] arr=new byte[(int)FILE_LENGTH];
for (int i=0; i < 2; ++i) {
try {
dis.read(arr,0,(int)FILE_LENGTH);
fail("Expected ChecksumException not thrown");
}
catch ( Exception ex) {
GenericTestUtils.assertExceptionContains("Checksum error",ex);
}
}
}
finally {
cluster.shutdown();
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testGetFileChecksum() throws Exception {
final String f="/testGetFileChecksum";
final Path p=new Path(f);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
cluster.waitActive();
final FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,p,1L << 20,(short)3,20100402L);
final FileChecksum cs1=fs.getFileChecksum(p);
assertTrue(cs1 != null);
final List locatedblocks=DFSClient.callGetBlockLocations(cluster.getNameNodeRpc(),f,0,Long.MAX_VALUE).getLocatedBlocks();
final DatanodeInfo first=locatedblocks.get(0).getLocations()[0];
cluster.stopDataNode(first.getXferAddr());
final FileChecksum cs2=fs.getFileChecksum(p);
assertEquals(cs1,cs2);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
/**
* This tests that DFSInputStream failures are counted for a given read
* operation, and not over the lifetime of the stream. It is a regression
* test for HDFS-127.
*/
@Test public void testFailuresArePerOperation() throws Exception {
long fileSize=4096;
Path file=new Path("/testFile");
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
conf.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,2 * 1000);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
NamenodeProtocols preSpyNN=cluster.getNameNodeRpc();
NamenodeProtocols spyNN=spy(preSpyNN);
DFSClient client=new DFSClient(null,spyNN,conf,null);
int maxBlockAcquires=client.getMaxBlockAcquireFailures();
assertTrue(maxBlockAcquires > 0);
DFSTestUtil.createFile(fs,file,fileSize,(short)1,12345L);
doAnswer(new FailNTimesAnswer(preSpyNN,maxBlockAcquires + 1)).when(spyNN).getBlockLocations(anyString(),anyLong(),anyLong());
try {
IOUtils.copyBytes(client.open(file.toString()),new IOUtils.NullOutputStream(),conf,true);
fail("Didn't get exception");
}
catch ( IOException ioe) {
DFSClient.LOG.info("Got expected exception",ioe);
}
doAnswer(new FailNTimesAnswer(preSpyNN,maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(),anyLong(),anyLong());
IOUtils.copyBytes(client.open(file.toString()),new IOUtils.NullOutputStream(),conf,true);
DFSClient.LOG.info("Starting test case for failure reset");
doAnswer(new FailNTimesAnswer(preSpyNN,maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(),anyLong(),anyLong());
DFSInputStream is=client.open(file.toString());
byte buf[]=new byte[10];
IOUtils.readFully(is,buf,0,buf.length);
DFSClient.LOG.info("First read successful after some failures.");
doAnswer(new FailNTimesAnswer(preSpyNN,maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(),anyLong(),anyLong());
is.openInfo();
is.seek(0);
IOUtils.readFully(is,buf,0,buf.length);
}
finally {
cluster.shutdown();
}
}
Class: org.apache.hadoop.hdfs.TestDFSRemove
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testRemove() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
FileSystem fs=cluster.getFileSystem();
assertTrue(fs.mkdirs(dir));
long dfsUsedStart=getTotalDfsUsed(cluster);
{
final int fileCount=100;
for (int i=0; i < fileCount; i++) {
Path a=new Path(dir,"a" + i);
createFile(fs,a);
}
long dfsUsedMax=getTotalDfsUsed(cluster);
for (int i=0; i < fileCount; i++) {
Path a=new Path(dir,"a" + i);
fs.delete(a,false);
}
Thread.sleep(3 * DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000);
long dfsUsedFinal=getTotalDfsUsed(cluster);
assertEquals("All blocks should be gone. start=" + dfsUsedStart + " max="+ dfsUsedMax+ " final="+ dfsUsedFinal,dfsUsedStart,dfsUsedFinal);
}
fs.delete(dir,true);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test to make sure that user namespace xattrs can be set only if path has
* access and for sticky directorries, only owner/privileged user can write.
* Trusted namespace xattrs can be set only with privileged users.
* As user1: Create a directory (/foo) as user1, chown it to user1 (and
* user1's group), grant rwx to "other".
* As user2: Set an xattr (should pass with path access).
* As user1: Set an xattr (should pass).
* As user2: Read the xattr (should pass). Remove the xattr (should pass with
* path access).
* As user1: Read the xattr (should pass). Remove the xattr (should pass).
* As user1: Change permissions only to owner
* As User2: Set an Xattr (Should fail set with no path access) Remove an
* Xattr (Should fail with no path access)
* As SuperUser: Set an Xattr with Trusted (Should pass)
*/
@Test(timeout=30000) public void testSetXAttrPermissionAsDifferentOwner() throws Exception {
final String USER1="user1";
final String GROUP1="supergroup";
final UserGroupInformation user1=UserGroupInformation.createUserForTesting(USER1,new String[]{GROUP1});
final UserGroupInformation user2=UserGroupInformation.createUserForTesting("user2",new String[]{"mygroup2"});
final UserGroupInformation SUPERUSER=UserGroupInformation.getCurrentUser();
MiniDFSCluster cluster=null;
PrintStream bak=null;
try {
final Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final FileSystem fs=cluster.getFileSystem();
fs.setOwner(new Path("/"),USER1,GROUP1);
bak=System.err;
final FsShell fshell=new FsShell(conf);
final ByteArrayOutputStream out=new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
user1.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final int ret=ToolRunner.run(fshell,new String[]{"-mkdir","/foo"});
assertEquals("Return should be 0",0,ret);
out.reset();
return null;
}
}
);
user1.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final int ret=ToolRunner.run(fshell,new String[]{"-chmod","707","/foo"});
assertEquals("Return should be 0",0,ret);
out.reset();
return null;
}
}
);
user2.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
return null;
}
}
);
user1.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
return null;
}
}
);
user2.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.a1","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
ret=ToolRunner.run(fshell,new String[]{"-setfattr","-x","user.a1","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
return null;
}
}
);
user1.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
return null;
}
}
);
user1.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final int ret=ToolRunner.run(fshell,new String[]{"-chmod","700","/foo"});
assertEquals("Return should be 0",0,ret);
out.reset();
return null;
}
}
);
user2.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a2","/foo"});
assertEquals("Returned should be 1",1,ret);
final String str=out.toString();
assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
}
);
user2.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-x","user.a2","/foo"});
assertEquals("Returned should be 1",1,ret);
final String str=out.toString();
assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
}
);
SUPERUSER.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","trusted.a3","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
return null;
}
}
);
}
finally {
if (bak != null) {
System.setErr(bak);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testCopyCommandsToDirectoryWithPreserveOption() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY,true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
FsShell shell=null;
FileSystem fs=null;
final String testdir="/tmp/TestDFSShell-testCopyCommandsToDirectoryWithPreserveOption-" + counter.getAndIncrement();
final Path hdfsTestDir=new Path(testdir);
try {
fs=cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
Path srcDir=new Path(hdfsTestDir,"srcDir");
fs.mkdirs(srcDir);
fs.setAcl(srcDir,Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,GROUP,"bar",READ_EXECUTE),aclEntry(ACCESS,OTHER,EXECUTE)));
fs.setPermission(srcDir,new FsPermission(ALL,READ_EXECUTE,EXECUTE,true));
Path srcFile=new Path(srcDir,"srcFile");
fs.create(srcFile).close();
FileStatus status=fs.getFileStatus(srcDir);
final long mtime=status.getModificationTime();
final long atime=status.getAccessTime();
final String owner=status.getOwner();
final String group=status.getGroup();
final FsPermission perm=status.getPermission();
fs.setXAttr(srcDir,USER_A1,USER_A1_VALUE);
fs.setXAttr(srcDir,TRUSTED_A1,TRUSTED_A1_VALUE);
shell=new FsShell(conf);
Path targetDir1=new Path(hdfsTestDir,"targetDir1");
String[] argv=new String[]{"-cp","-p",srcDir.toUri().toString(),targetDir1.toUri().toString()};
int ret=ToolRunner.run(shell,argv);
assertEquals("cp -p is not working",SUCCESS,ret);
FileStatus targetStatus=fs.getFileStatus(targetDir1);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
FsPermission targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
Map xattrs=fs.getXAttrs(targetDir1);
assertTrue(xattrs.isEmpty());
List acls=fs.getAclStatus(targetDir1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path targetDir2=new Path(hdfsTestDir,"targetDir2");
argv=new String[]{"-cp","-ptop",srcDir.toUri().toString(),targetDir2.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptop is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(targetDir2);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(targetDir2);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(targetDir2).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path targetDir3=new Path(hdfsTestDir,"targetDir3");
argv=new String[]{"-cp","-ptopx",srcDir.toUri().toString(),targetDir3.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopx is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(targetDir3);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(targetDir3);
assertEquals(xattrs.size(),2);
assertArrayEquals(USER_A1_VALUE,xattrs.get(USER_A1));
assertArrayEquals(TRUSTED_A1_VALUE,xattrs.get(TRUSTED_A1));
acls=fs.getAclStatus(targetDir3).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path targetDir4=new Path(hdfsTestDir,"targetDir4");
argv=new String[]{"-cp","-ptopa",srcDir.toUri().toString(),targetDir4.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(targetDir4);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(targetDir4);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(targetDir4).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(srcDir),fs.getAclStatus(targetDir4));
Path targetDir5=new Path(hdfsTestDir,"targetDir5");
argv=new String[]{"-cp","-ptoa",srcDir.toUri().toString(),targetDir5.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptoa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(targetDir5);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(targetDir5);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(targetDir5).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(srcDir),fs.getAclStatus(targetDir5));
}
finally {
if (shell != null) {
shell.close();
}
if (fs != null) {
fs.delete(hdfsTestDir,true);
fs.close();
}
cluster.shutdown();
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testCopyCommandsPreserveAclAndStickyBit() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
FsShell shell=null;
FileSystem fs=null;
final String testdir="/tmp/TestDFSShell-testCopyCommandsPreserveAclAndStickyBit-" + counter.getAndIncrement();
final Path hdfsTestDir=new Path(testdir);
try {
fs=cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
Path src=new Path(hdfsTestDir,"srcfile");
fs.create(src).close();
fs.setAcl(src,Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,GROUP,"bar",READ_EXECUTE),aclEntry(ACCESS,OTHER,EXECUTE)));
fs.setPermission(src,new FsPermission(ALL,READ_EXECUTE,EXECUTE,true));
FileStatus status=fs.getFileStatus(src);
final long mtime=status.getModificationTime();
final long atime=status.getAccessTime();
final String owner=status.getOwner();
final String group=status.getGroup();
final FsPermission perm=status.getPermission();
shell=new FsShell(conf);
Path target1=new Path(hdfsTestDir,"targetfile1");
String[] argv=new String[]{"-cp","-p",src.toUri().toString(),target1.toUri().toString()};
int ret=ToolRunner.run(shell,argv);
assertEquals("cp is not working",SUCCESS,ret);
FileStatus targetStatus=fs.getFileStatus(target1);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
FsPermission targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
List acls=fs.getAclStatus(target1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path target2=new Path(hdfsTestDir,"targetfile2");
argv=new String[]{"-cp","-ptopa",src.toUri().toString(),target2.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target2);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
acls=fs.getAclStatus(target2).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(src),fs.getAclStatus(target2));
}
finally {
if (null != shell) {
shell.close();
}
if (null != fs) {
fs.delete(hdfsTestDir,true);
fs.close();
}
cluster.shutdown();
}
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testGetFAttrErrors() throws Exception {
final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"});
MiniDFSCluster cluster=null;
PrintStream bakErr=null;
try {
final Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final FileSystem fs=cluster.getFileSystem();
final Path p=new Path("/foo");
fs.mkdirs(p);
bakErr=System.err;
final FsShell fshell=new FsShell(conf);
final ByteArrayOutputStream out=new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
fs.setPermission(p,new FsPermission((short)0700));
{
final int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
}
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.a1","/foo"});
String str=out.toString();
assertTrue("xattr value was incorrectly returned",str.indexOf("1234") == -1);
out.reset();
return null;
}
}
);
{
final int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.nonexistent","/foo"});
String str=out.toString();
assertTrue("xattr value was incorrectly returned",str.indexOf("getfattr: At least one of the attributes provided was not found") >= 0);
out.reset();
}
}
finally {
if (bakErr != null) {
System.setErr(bakErr);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testCopyCommandsWithPreserveOption() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY,true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
FsShell shell=null;
FileSystem fs=null;
final String testdir="/tmp/TestDFSShell-testCopyCommandsWithPreserveOption-" + counter.getAndIncrement();
final Path hdfsTestDir=new Path(testdir);
try {
fs=cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
Path src=new Path(hdfsTestDir,"srcfile");
fs.create(src).close();
fs.setAcl(src,Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,GROUP,"bar",READ_EXECUTE),aclEntry(ACCESS,OTHER,EXECUTE)));
FileStatus status=fs.getFileStatus(src);
final long mtime=status.getModificationTime();
final long atime=status.getAccessTime();
final String owner=status.getOwner();
final String group=status.getGroup();
final FsPermission perm=status.getPermission();
fs.setXAttr(src,USER_A1,USER_A1_VALUE);
fs.setXAttr(src,TRUSTED_A1,TRUSTED_A1_VALUE);
shell=new FsShell(conf);
Path target1=new Path(hdfsTestDir,"targetfile1");
String[] argv=new String[]{"-cp","-p",src.toUri().toString(),target1.toUri().toString()};
int ret=ToolRunner.run(shell,argv);
assertEquals("cp -p is not working",SUCCESS,ret);
FileStatus targetStatus=fs.getFileStatus(target1);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
FsPermission targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
Map xattrs=fs.getXAttrs(target1);
assertTrue(xattrs.isEmpty());
List acls=fs.getAclStatus(target1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path target2=new Path(hdfsTestDir,"targetfile2");
argv=new String[]{"-cp","-ptop",src.toUri().toString(),target2.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptop is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target2);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(target2);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(target2).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path target3=new Path(hdfsTestDir,"targetfile3");
argv=new String[]{"-cp","-ptopx",src.toUri().toString(),target3.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopx is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target3);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(target3);
assertEquals(xattrs.size(),2);
assertArrayEquals(USER_A1_VALUE,xattrs.get(USER_A1));
assertArrayEquals(TRUSTED_A1_VALUE,xattrs.get(TRUSTED_A1));
acls=fs.getAclStatus(target3).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path target4=new Path(hdfsTestDir,"targetfile4");
argv=new String[]{"-cp","-ptopa",src.toUri().toString(),target4.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target4);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(target4);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(target4).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(src),fs.getAclStatus(target4));
Path target5=new Path(hdfsTestDir,"targetfile5");
argv=new String[]{"-cp","-ptoa",src.toUri().toString(),target5.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptoa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target5);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(target5);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(target5).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(src),fs.getAclStatus(target5));
}
finally {
if (null != shell) {
shell.close();
}
if (null != fs) {
fs.delete(hdfsTestDir,true);
fs.close();
}
cluster.shutdown();
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This test ensures the appropriate response (successful or failure) from
* a Datanode when the system is started with differing version combinations.
*
* For each 3-tuple in the cross product
* ({oldLayoutVersion,currentLayoutVersion,futureLayoutVersion},
* {currentNamespaceId,incorrectNamespaceId},
* {pastFsscTime,currentFsscTime,futureFsscTime})
* 1. Startup Namenode with version file containing
* (currentLayoutVersion,currentNamespaceId,currentFsscTime)
* 2. Attempt to startup Datanode with version file containing
* this iterations version 3-tuple
*
UtilityVerifierExceptionVerifierHybridVerifier
@Test(expected=IOException.class) public void testUpgradeFromPreUpgradeLVFails() throws IOException {
Storage.checkVersionUpgradable(Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION + 1);
fail("Expected IOException is not thrown");
}
IterativeVerifierUtilityVerifierEqualityVerifierHybridVerifier
/**
* This test attempts to upgrade the NameNode and DataNode under
* a number of valid and invalid conditions.
*/
@Test(timeout=60000) public void testUpgrade() throws Exception {
File[] baseDirs;
StorageInfo storageInfo=null;
for (int numDirs=1; numDirs <= 2; numDirs++) {
conf=new HdfsConfiguration();
conf=UpgradeUtilities.initializeStorageStateConf(numDirs,conf);
String[] nameNodeDirs=conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
String[] dataNodeDirs=conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
log("Normal NameNode upgrade",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
cluster=createCluster();
try {
final DistributedFileSystem dfs=cluster.getFileSystem();
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
fail();
}
catch ( RemoteException re) {
assertEquals(InconsistentFSStateException.class.getName(),re.getClassName());
LOG.info("The exception is expected.",re);
}
checkNameNode(nameNodeDirs,EXPECTED_TXID);
if (numDirs > 1) TestParallelImageWrite.checkImages(cluster.getNamesystem(),numDirs);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("Normal DataNode upgrade",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
cluster=createCluster();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current");
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
checkDataNode(dataNodeDirs,UpgradeUtilities.getCurrentBlockPoolID(null));
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("NameNode upgrade with existing previous dir",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("DataNode upgrade with existing previous dir",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
cluster=createCluster();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current");
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"previous");
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
checkDataNode(dataNodeDirs,UpgradeUtilities.getCurrentBlockPoolID(null));
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("DataNode upgrade with future stored layout version in current",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
cluster=createCluster();
baseDirs=UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current");
storageInfo=new StorageInfo(Integer.MIN_VALUE,UpgradeUtilities.getCurrentNamespaceID(cluster),UpgradeUtilities.getCurrentClusterID(cluster),UpgradeUtilities.getCurrentFsscTime(cluster),NodeType.DATA_NODE);
UpgradeUtilities.createDataNodeVersionFile(baseDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster));
startBlockPoolShouldFail(StartupOption.REGULAR,UpgradeUtilities.getCurrentBlockPoolID(null));
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("DataNode upgrade with newer fsscTime in current",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
cluster=createCluster();
baseDirs=UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current");
storageInfo=new StorageInfo(HdfsConstants.DATANODE_LAYOUT_VERSION,UpgradeUtilities.getCurrentNamespaceID(cluster),UpgradeUtilities.getCurrentClusterID(cluster),Long.MAX_VALUE,NodeType.DATA_NODE);
UpgradeUtilities.createDataNodeVersionFile(baseDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster));
startBlockPoolShouldFail(StartupOption.REGULAR,UpgradeUtilities.getCurrentBlockPoolID(null));
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("NameNode upgrade with no edits file",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
deleteStorageFilesWithPrefix(nameNodeDirs,"edits_");
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode upgrade with no image file",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
deleteStorageFilesWithPrefix(nameNodeDirs,"fsimage_");
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode upgrade with corrupt version file",numDirs);
baseDirs=UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
for ( File f : baseDirs) {
UpgradeUtilities.corruptFile(new File(f,"VERSION"),"layoutVersion".getBytes(Charsets.UTF_8),"xxxxxxxxxxxxx".getBytes(Charsets.UTF_8));
}
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode upgrade with old layout version in current",numDirs);
baseDirs=UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
storageInfo=new StorageInfo(Storage.LAST_UPGRADABLE_LAYOUT_VERSION + 1,UpgradeUtilities.getCurrentNamespaceID(null),UpgradeUtilities.getCurrentClusterID(null),UpgradeUtilities.getCurrentFsscTime(null),NodeType.NAME_NODE);
UpgradeUtilities.createNameNodeVersionFile(conf,baseDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster));
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode upgrade with future layout version in current",numDirs);
baseDirs=UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
storageInfo=new StorageInfo(Integer.MIN_VALUE,UpgradeUtilities.getCurrentNamespaceID(null),UpgradeUtilities.getCurrentClusterID(null),UpgradeUtilities.getCurrentFsscTime(null),NodeType.NAME_NODE);
UpgradeUtilities.createNameNodeVersionFile(conf,baseDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster));
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
}
int numDirs=4;
{
conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,-1);
conf=UpgradeUtilities.initializeStorageStateConf(numDirs,conf);
String[] nameNodeDirs=conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
log("Normal NameNode upgrade",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
cluster=createCluster();
try {
final DistributedFileSystem dfs=cluster.getFileSystem();
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
fail();
}
catch ( RemoteException re) {
assertEquals(InconsistentFSStateException.class.getName(),re.getClassName());
LOG.info("The exception is expected.",re);
}
checkNameNode(nameNodeDirs,EXPECTED_TXID);
TestParallelImageWrite.checkImages(cluster.getNamesystem(),numDirs);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
}
}
BooleanVerifierIgnoredMethodHybridVerifier
@Ignore public void test203LayoutVersion(){
for ( int lv : Storage.LAYOUT_VERSIONS_203) {
assertTrue(Storage.is203LayoutVersion(lv));
}
}
IterativeVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test upgrade from 2.0 image with a variety of .snapshot and .reserved
* paths to test renaming on upgrade
*/
@Test public void testUpgradeFromRel2ReservedImage() throws Exception {
unpackStorage(HADOOP2_RESERVED_IMAGE,HADOOP_DFS_DIR_TXT);
MiniDFSCluster cluster=null;
final Configuration conf=new Configuration();
try {
cluster=new MiniDFSCluster.Builder(conf).format(false).startupOption(StartupOption.UPGRADE).numDataNodes(0).build();
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("reserved path component in this version",e);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
try {
FSImageFormat.setRenameReservedPairs(".snapshot=.user-snapshot," + ".reserved=.my-reserved");
cluster=new MiniDFSCluster.Builder(conf).format(false).startupOption(StartupOption.UPGRADE).numDataNodes(0).build();
DistributedFileSystem dfs=cluster.getFileSystem();
final String[] expected=new String[]{"/edits","/edits/.reserved","/edits/.user-snapshot","/edits/.user-snapshot/editsdir","/edits/.user-snapshot/editsdir/editscontents","/edits/.user-snapshot/editsdir/editsdir2","/image","/image/.reserved","/image/.user-snapshot","/image/.user-snapshot/imagedir","/image/.user-snapshot/imagedir/imagecontents","/image/.user-snapshot/imagedir/imagedir2","/.my-reserved","/.my-reserved/edits-touch","/.my-reserved/image-touch"};
for (int i=0; i < 2; i++) {
if (i == 1) {
cluster.finalizeCluster(conf);
cluster.restartNameNode(true);
}
ArrayList toList=new ArrayList();
toList.add(new Path("/"));
ArrayList found=new ArrayList();
while (!toList.isEmpty()) {
Path p=toList.remove(0);
FileStatus[] statuses=dfs.listStatus(p);
for ( FileStatus status : statuses) {
final String path=status.getPath().toUri().getPath();
System.out.println("Found path " + path);
found.add(path);
if (status.isDirectory()) {
toList.add(status.getPath());
}
}
}
for ( String s : expected) {
assertTrue("Did not find expected path " + s,found.contains(s));
}
assertEquals("Found an unexpected path while listing filesystem",found.size(),expected.length);
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
IterativeVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test upgrade from a branch-1.2 image with reserved paths
*/
@Test public void testUpgradeFromRel1ReservedImage() throws Exception {
unpackStorage(HADOOP1_RESERVED_IMAGE,HADOOP_DFS_DIR_TXT);
MiniDFSCluster cluster=null;
final Configuration conf=new Configuration();
try {
FSImageFormat.setRenameReservedPairs(".snapshot=.user-snapshot," + ".reserved=.my-reserved");
cluster=new MiniDFSCluster.Builder(conf).format(false).startupOption(StartupOption.UPGRADE).numDataNodes(0).build();
DistributedFileSystem dfs=cluster.getFileSystem();
final String[] expected=new String[]{"/.my-reserved","/.user-snapshot","/.user-snapshot/.user-snapshot","/.user-snapshot/open","/dir1","/dir1/.user-snapshot","/dir2","/dir2/.user-snapshot","/user","/user/andrew","/user/andrew/.user-snapshot"};
for (int i=0; i < 2; i++) {
if (i == 1) {
cluster.finalizeCluster(conf);
cluster.restartNameNode(true);
}
ArrayList toList=new ArrayList();
toList.add(new Path("/"));
ArrayList found=new ArrayList();
while (!toList.isEmpty()) {
Path p=toList.remove(0);
FileStatus[] statuses=dfs.listStatus(p);
for ( FileStatus status : statuses) {
final String path=status.getPath().toUri().getPath();
System.out.println("Found path " + path);
found.add(path);
if (status.isDirectory()) {
toList.add(status.getPath());
}
}
}
for ( String s : expected) {
assertTrue("Did not find expected path " + s,found.contains(s));
}
assertEquals("Found an unexpected path while listing filesystem",found.size(),expected.length);
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifierBooleanVerifierHybridVerifier
/**
* Test that sets up a fake image from Hadoop 0.3.0 and tries to start a
* NN, verifying that the correct error message is thrown.
*/
@Test public void testFailOnPreUpgradeImage() throws IOException {
Configuration conf=new HdfsConfiguration();
File namenodeStorage=new File(TEST_ROOT_DIR,"nnimage-0.3.0");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,namenodeStorage.toString());
FileUtil.fullyDelete(namenodeStorage);
assertTrue("Make " + namenodeStorage,namenodeStorage.mkdirs());
File imageDir=new File(namenodeStorage,"image");
assertTrue("Make " + imageDir,imageDir.mkdirs());
File imageFile=new File(imageDir,"fsimage");
byte[] imageBytes=StringUtils.hexStringToByte("fffffffee17c0d2700000000");
FileOutputStream fos=new FileOutputStream(imageFile);
try {
fos.write(imageBytes);
}
finally {
fos.close();
}
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).startupOption(StartupOption.REGULAR).build();
fail("Was able to start NN from 0.3.0 image");
}
catch ( IOException ioe) {
if (!ioe.toString().contains("Old layout version is 'too old'")) {
throw ioe;
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
IterativeVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test upgrade from a 0.23.11 image with reserved paths
*/
@Test public void testUpgradeFromRel023ReservedImage() throws Exception {
unpackStorage(HADOOP023_RESERVED_IMAGE,HADOOP_DFS_DIR_TXT);
MiniDFSCluster cluster=null;
final Configuration conf=new Configuration();
try {
FSImageFormat.setRenameReservedPairs(".snapshot=.user-snapshot," + ".reserved=.my-reserved");
cluster=new MiniDFSCluster.Builder(conf).format(false).startupOption(StartupOption.UPGRADE).numDataNodes(0).build();
DistributedFileSystem dfs=cluster.getFileSystem();
final String[] expected=new String[]{"/.user-snapshot","/dir1","/dir1/.user-snapshot","/dir2","/dir2/.user-snapshot"};
for (int i=0; i < 2; i++) {
if (i == 1) {
cluster.finalizeCluster(conf);
cluster.restartNameNode(true);
}
ArrayList toList=new ArrayList();
toList.add(new Path("/"));
ArrayList found=new ArrayList();
while (!toList.isEmpty()) {
Path p=toList.remove(0);
FileStatus[] statuses=dfs.listStatus(p);
for ( FileStatus status : statuses) {
final String path=status.getPath().toUri().getPath();
System.out.println("Found path " + path);
found.add(path);
if (status.isDirectory()) {
toList.add(status.getPath());
}
}
}
for ( String s : expected) {
assertTrue("Did not find expected path " + s,found.contains(s));
}
assertEquals("Found an unexpected path while listing filesystem",found.size(),expected.length);
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test upgrade from 0.22 image with corrupt md5, make sure it
* fails to upgrade
*/
@Test public void testUpgradeFromCorruptRel22Image() throws IOException {
unpackStorage(HADOOP22_IMAGE,HADOOP_DFS_DIR_TXT);
File baseDir=new File(MiniDFSCluster.getBaseDirectory());
FSImageTestUtil.corruptVersionFile(new File(baseDir,"name1/current/VERSION"),"imageMD5Digest","22222222222222222222222222222222");
FSImageTestUtil.corruptVersionFile(new File(baseDir,"name2/current/VERSION"),"imageMD5Digest","22222222222222222222222222222222");
final LogVerificationAppender appender=new LogVerificationAppender();
final Logger logger=Logger.getRootLogger();
logger.addAppender(appender);
try {
upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).numDataNodes(4));
fail("Upgrade did not fail with bad MD5");
}
catch ( IOException ioe) {
String msg=StringUtils.stringifyException(ioe);
if (!msg.contains("Failed to load an FSImage file")) {
throw ioe;
}
int md5failures=appender.countExceptionsWithMessage(" is corrupt with MD5 checksum of ");
assertEquals("Upgrade did not fail with bad MD5",1,md5failures);
}
}
Class: org.apache.hadoop.hdfs.TestDFSUtil
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Ensure that fs.defaultFS is set in the configuration even if neither HA nor
* Federation is enabled.
* Regression test for HDFS-3351.
*/
@Test public void testConfModificationNoFederationOrHa(){
final HdfsConfiguration conf=new HdfsConfiguration();
String nsId=null;
String nnId=null;
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY,"localhost:1234");
assertFalse("hdfs://localhost:1234".equals(conf.get(FS_DEFAULT_NAME_KEY)));
NameNode.initializeGenericKeys(conf,nsId,nnId);
assertEquals("hdfs://localhost:1234",conf.get(FS_DEFAULT_NAME_KEY));
}
UtilityVerifierExceptionVerifierHybridVerifier
/**
* Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure
* exception is thrown when multiple rpc addresses match the local node's
* address
*/
@Test(expected=HadoopIllegalArgumentException.class) public void testGetNameServiceIdException(){
HdfsConfiguration conf=new HdfsConfiguration();
conf.set(DFS_NAMESERVICES,"nn1,nn2");
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"nn1"),"localhost:9000");
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"nn2"),"localhost:9001");
DFSUtil.getNamenodeNameServiceId(conf);
fail("Expected exception is not thrown");
}
APIUtilityVerifierIterativeVerifierBranchVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testDuplicateScans() throws Exception {
long startTime=Time.monotonicNow();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(new Configuration()).numDataNodes(1).build();
FileSystem fs=null;
try {
fs=cluster.getFileSystem();
DataNode dataNode=cluster.getDataNodes().get(0);
int infoPort=dataNode.getInfoPort();
long scanTimeBefore=0, scanTimeAfter=0;
for (int i=1; i < 10; i++) {
Path fileName=new Path("/test" + i);
DFSTestUtil.createFile(fs,fileName,1024,(short)1,1000L);
waitForVerification(infoPort,fs,fileName,i,startTime,TIMEOUT);
if (i > 1) {
scanTimeAfter=DataNodeTestUtils.getLatestScanTime(dataNode,DFSTestUtil.getFirstBlock(fs,new Path("/test" + (i - 1))));
assertFalse("scan time shoud not be 0",scanTimeAfter == 0);
assertEquals("There should not be duplicate scan",scanTimeBefore,scanTimeAfter);
}
scanTimeBefore=DataNodeTestUtils.getLatestScanTime(dataNode,DFSTestUtil.getFirstBlock(fs,new Path("/test" + i)));
}
cluster.restartDataNode(0);
Thread.sleep(10000);
dataNode=cluster.getDataNodes().get(0);
scanTimeAfter=DataNodeTestUtils.getLatestScanTime(dataNode,DFSTestUtil.getFirstBlock(fs,new Path("/test" + (9))));
assertEquals("There should not be duplicate scan",scanTimeBefore,scanTimeAfter);
}
finally {
IOUtils.closeStream(fs);
cluster.shutdown();
}
}
Class: org.apache.hadoop.hdfs.TestDatanodeConfig
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierHybridVerifier
/**
* Test that a data-node does not start if configuration specifies
* incorrect URI scheme in data directory.
* Test that a data-node starts if data directory is specified as
* URI = "file:///path" or as a non URI path.
*/
@Test public void testDataDirectories() throws IOException {
File dataDir=new File(BASE_DIR,"data").getCanonicalFile();
Configuration conf=cluster.getConfiguration(0);
String dnDir=makeURI("shv",null,fileAsURI(dataDir).getPath());
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,dnDir);
DataNode dn=null;
try {
dn=DataNode.createDataNode(new String[]{},conf);
fail();
}
catch ( Exception e) {
}
finally {
if (dn != null) {
dn.shutdown();
}
}
assertNull("Data-node startup should have failed.",dn);
String dnDir1=fileAsURI(dataDir).toString() + "1";
String dnDir2=makeURI("file","localhost",fileAsURI(dataDir).getPath() + "2");
String dnDir3=dataDir.getAbsolutePath() + "3";
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,dnDir1 + "," + dnDir2+ ","+ dnDir3);
try {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
assertTrue("Data-node should startup.",cluster.isDataNodeUp());
}
finally {
if (cluster != null) {
cluster.shutdownDataNodes();
}
}
}
BranchVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Regression test for HDFS-894 ensures that, when datanodes
* are restarted, the new IPC port is registered with the
* namenode.
*/
@Test public void testChangeIpcPort() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).build();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
cluster.restartDataNodes();
DatanodeInfo[] report=client.datanodeReport(DatanodeReportType.ALL);
long firstUpdateAfterRestart=report[0].getLastUpdate();
boolean gotHeartbeat=false;
for (int i=0; i < 10 && !gotHeartbeat; i++) {
try {
Thread.sleep(i * 1000);
}
catch ( InterruptedException ie) {
}
report=client.datanodeReport(DatanodeReportType.ALL);
gotHeartbeat=(report[0].getLastUpdate() > firstUpdateAfterRestart);
}
if (!gotHeartbeat) {
fail("Never got a heartbeat from restarted datanode.");
}
int realIpcPort=cluster.getDataNodes().get(0).getIpcPort();
assertEquals(realIpcPort,report[0].getIpcPort());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
BooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* Ensure the datanode manager does not do host lookup after registration,
* especially for node reports.
* @throws Exception
*/
@Test public void testDNSLookups() throws Exception {
MonitorDNS sm=new MonitorDNS();
System.setSecurityManager(sm);
MiniDFSCluster cluster=null;
try {
HdfsConfiguration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(8).build();
cluster.waitActive();
int initialLookups=sm.lookups;
assertTrue("dns security manager is active",initialLookups != 0);
DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager();
dm.refreshNodes(conf);
assertEquals(initialLookups,sm.lookups);
dm.refreshNodes(conf);
assertEquals(initialLookups,sm.lookups);
dm.getDatanodeListForReport(DatanodeReportType.ALL);
assertEquals(initialLookups,sm.lookups);
dm.getDatanodeListForReport(DatanodeReportType.LIVE);
assertEquals(initialLookups,sm.lookups);
dm.getDatanodeListForReport(DatanodeReportType.DEAD);
assertEquals(initialLookups,sm.lookups);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
System.setSecurityManager(null);
}
}
Class: org.apache.hadoop.hdfs.TestDecommission
APIUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Tests decommission with replicas on the target datanode cannot be migrated
* to other datanodes and satisfy the replication factor. Make sure the
* datanode won't get stuck in decommissioning state.
*/
@Test(timeout=360000) public void testDecommission2() throws IOException {
LOG.info("Starting test testDecommission");
int numNamenodes=1;
int numDatanodes=4;
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
startCluster(numNamenodes,numDatanodes,conf);
ArrayList> namenodeDecomList=new ArrayList>(numNamenodes);
namenodeDecomList.add(0,new ArrayList(numDatanodes));
Path file1=new Path("testDecommission2.dat");
int replicas=4;
ArrayList decommissionedNodes=namenodeDecomList.get(0);
FileSystem fileSys=cluster.getFileSystem(0);
FSNamesystem ns=cluster.getNamesystem(0);
writeFile(fileSys,file1,replicas);
int deadDecomissioned=ns.getNumDecomDeadDataNodes();
int liveDecomissioned=ns.getNumDecomLiveDataNodes();
DatanodeInfo decomNode=decommissionNode(0,null,decommissionedNodes,AdminStates.DECOMMISSIONED);
decommissionedNodes.add(decomNode);
assertEquals(deadDecomissioned,ns.getNumDecomDeadDataNodes());
assertEquals(liveDecomissioned + 1,ns.getNumDecomLiveDataNodes());
DFSClient client=getDfsClient(cluster.getNameNode(0),conf);
assertEquals("All datanodes must be alive",numDatanodes,client.datanodeReport(DatanodeReportType.LIVE).length);
assertNull(checkFile(fileSys,file1,replicas,decomNode.getXferAddr(),numDatanodes));
cleanupFile(fileSys,file1);
cluster.shutdown();
startCluster(1,4,conf);
cluster.shutdown();
}
APIUtilityVerifierIterativeVerifierBranchVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test using a "registration name" in a host include file.
* Registration names are DataNode names specified in the configuration by
* dfs.datanode.hostname. The DataNode will send this name to the NameNode
* as part of its registration. Registration names are helpful when you
* want to override the normal first result of DNS resolution on the
* NameNode. For example, a given datanode IP may map to two hostnames,
* and you may want to choose which hostname is used internally in the
* cluster.
* It is not recommended to use a registration name which is not also a
* valid DNS hostname for the DataNode. See HDFS-5237 for background.
*/
@Test(timeout=360000) public void testIncludeByRegistrationName() throws IOException, InterruptedException {
Configuration hdfsConf=new Configuration(conf);
final String registrationName="127.0.0.100";
final String nonExistentDn="127.0.0.10";
hdfsConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY,registrationName);
cluster=new MiniDFSCluster.Builder(hdfsConf).numDataNodes(1).checkDataNodeHostConfig(true).setupHostsFile(true).build();
cluster.waitActive();
ArrayList nodes=new ArrayList();
nodes.add(nonExistentDn);
writeConfigFile(hostsFile,nodes);
refreshNodes(cluster.getNamesystem(0),hdfsConf);
DFSClient client=getDfsClient(cluster.getNameNode(0),hdfsConf);
while (true) {
DatanodeInfo info[]=client.datanodeReport(DatanodeReportType.DEAD);
if (info.length == 1) {
break;
}
LOG.info("Waiting for datanode to be marked dead");
Thread.sleep(HEARTBEAT_INTERVAL * 1000);
}
int dnPort=cluster.getDataNodes().get(0).getXferPort();
nodes=new ArrayList();
nodes.add(registrationName + ":" + dnPort);
writeConfigFile(hostsFile,nodes);
refreshNodes(cluster.getNamesystem(0),hdfsConf);
cluster.restartDataNode(0);
while (true) {
DatanodeInfo info[]=client.datanodeReport(DatanodeReportType.LIVE);
if (info.length == 1) {
Assert.assertFalse(info[0].isDecommissioned());
Assert.assertFalse(info[0].isDecommissionInProgress());
assertEquals(registrationName,info[0].getHostName());
break;
}
LOG.info("Waiting for datanode to come back");
Thread.sleep(HEARTBEAT_INTERVAL * 1000);
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Tests restart of namenode while datanode hosts are added to exclude file
*/
@Test(timeout=360000) public void testDecommissionWithNamenodeRestart() throws IOException, InterruptedException {
LOG.info("Starting test testDecommissionWithNamenodeRestart");
int numNamenodes=1;
int numDatanodes=1;
int replicas=1;
startCluster(numNamenodes,numDatanodes,conf);
Path file1=new Path("testDecommission.dat");
FileSystem fileSys=cluster.getFileSystem();
writeFile(fileSys,file1,replicas);
DFSClient client=getDfsClient(cluster.getNameNode(),conf);
DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE);
DatanodeID excludedDatanodeID=info[0];
String excludedDatanodeName=info[0].getXferAddr();
writeConfigFile(excludeFile,new ArrayList(Arrays.asList(excludedDatanodeName)));
cluster.startDataNodes(conf,1,true,null,null,null,null);
numDatanodes+=1;
assertEquals("Number of datanodes should be 2 ",2,cluster.getDataNodes().size());
cluster.restartNameNode();
DatanodeInfo datanodeInfo=NameNodeAdapter.getDatanode(cluster.getNamesystem(),excludedDatanodeID);
waitNodeState(datanodeInfo,AdminStates.DECOMMISSIONED);
assertEquals("All datanodes must be alive",numDatanodes,client.datanodeReport(DatanodeReportType.LIVE).length);
int tries=0;
while (tries++ < 20) {
try {
Thread.sleep(1000);
if (checkFile(fileSys,file1,replicas,datanodeInfo.getXferAddr(),numDatanodes) == null) {
break;
}
}
catch ( InterruptedException ie) {
}
}
assertTrue("Checked if block was replicated after decommission, tried " + tries + " times.",tries < 20);
cleanupFile(fileSys,file1);
cluster.shutdown();
startCluster(numNamenodes,numDatanodes,conf);
cluster.shutdown();
}
InternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Tests error paths for{@link DistributedFileSystem#getFileBlockStorageLocations(java.util.List)}
*/
@Test(timeout=60000) public void testGetFileBlockStorageLocationsError() throws Exception {
final Configuration conf=getTestConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,true);
conf.setInt(DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS,1500);
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,0);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.getDataNodes();
final DistributedFileSystem fs=cluster.getFileSystem();
final Path tmpFile1=new Path("/errorfile1.dat");
final Path tmpFile2=new Path("/errorfile2.dat");
DFSTestUtil.createFile(fs,tmpFile1,1024,(short)2,0xDEADDEADl);
DFSTestUtil.createFile(fs,tmpFile2,1024,(short)2,0xDEADDEADl);
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
try {
List list=Lists.newArrayList();
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile1,0,1024)));
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile2,0,1024)));
int totalRepl=0;
for ( BlockLocation loc : list) {
totalRepl+=loc.getHosts().length;
}
if (totalRepl == 4) {
return true;
}
}
catch ( IOException e) {
}
return false;
}
}
,500,30000);
BlockLocation[] blockLocs1=fs.getFileBlockLocations(tmpFile1,0,1024);
BlockLocation[] blockLocs2=fs.getFileBlockLocations(tmpFile2,0,1024);
List allLocs=Lists.newArrayList();
allLocs.addAll(Arrays.asList(blockLocs1));
allLocs.addAll(Arrays.asList(blockLocs2));
DataNodeFaultInjector injector=Mockito.mock(DataNodeFaultInjector.class);
Mockito.doAnswer(new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
Thread.sleep(3000);
return null;
}
}
).when(injector).getHdfsBlocksMetadata();
DataNodeFaultInjector.instance=injector;
BlockStorageLocation[] locs=fs.getFileBlockStorageLocations(allLocs);
for ( BlockStorageLocation loc : locs) {
assertEquals("Found more than 0 cached hosts although RPCs supposedly timed out",0,loc.getCachedHosts().length);
}
DataNodeFaultInjector.instance=new DataNodeFaultInjector();
DataNodeProperties stoppedNode=cluster.stopDataNode(0);
locs=fs.getFileBlockStorageLocations(allLocs);
assertEquals("Expected two HdfsBlockLocation for two 1-block files",2,locs.length);
for ( BlockStorageLocation l : locs) {
assertEquals("Expected two replicas for each block",2,l.getHosts().length);
assertEquals("Expected two VolumeIDs for each block",2,l.getVolumeIds().length);
assertTrue("Expected one valid and one invalid volume",(l.getVolumeIds()[0] == null) ^ (l.getVolumeIds()[1] == null));
}
cluster.restartDataNode(stoppedNode,true);
cluster.waitActive();
fs.delete(tmpFile2,true);
HATestUtil.waitForNNToIssueDeletions(cluster.getNameNode());
cluster.triggerHeartbeats();
HATestUtil.waitForDNDeletions(cluster);
locs=fs.getFileBlockStorageLocations(allLocs);
assertEquals("Expected two HdfsBlockLocations for two 1-block files",2,locs.length);
assertNotNull(locs[0].getVolumeIds()[0]);
assertNotNull(locs[0].getVolumeIds()[1]);
assertNull(locs[1].getVolumeIds()[0]);
assertNull(locs[1].getVolumeIds()[1]);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testDFSClient() throws Exception {
Configuration conf=getTestConfiguration();
final long grace=1000L;
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final String filepathstring="/test/LeaseChecker/foo";
final Path[] filepaths=new Path[4];
for (int i=0; i < filepaths.length; i++) {
filepaths[i]=new Path(filepathstring + i);
}
final long millis=Time.now();
{
final DistributedFileSystem dfs=cluster.getFileSystem();
dfs.dfs.getLeaseRenewer().setGraceSleepPeriod(grace);
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
{
final FSDataOutputStream out=dfs.create(filepaths[0]);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out.writeLong(millis);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out.close();
Thread.sleep(grace / 4 * 3);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
for (int i=0; i < 3; i++) {
if (dfs.dfs.getLeaseRenewer().isRunning()) {
Thread.sleep(grace / 2);
}
}
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
}
{
final FSDataOutputStream out1=dfs.create(filepaths[1]);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
final FSDataOutputStream out2=dfs.create(filepaths[2]);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out1.writeLong(millis);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out1.close();
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out2.writeLong(millis);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out2.close();
Thread.sleep(grace / 4 * 3);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
}
{
final FSDataOutputStream out3=dfs.create(filepaths[3]);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
Thread.sleep(grace / 4 * 3);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out3.writeLong(millis);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out3.close();
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
Thread.sleep(grace / 4 * 3);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
for (int i=0; i < 3; i++) {
if (dfs.dfs.getLeaseRenewer().isRunning()) {
Thread.sleep(grace / 2);
}
}
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
}
dfs.close();
}
{
FileSystem fs=cluster.getFileSystem();
Path dir=new Path("/wrwelkj");
assertFalse("File should not exist for test.",fs.exists(dir));
try {
FSDataInputStream in=fs.open(dir);
try {
in.close();
fs.close();
}
finally {
assertTrue("Did not get a FileNotFoundException for non-existing" + " file.",false);
}
}
catch ( FileNotFoundException fnf) {
}
}
{
final DistributedFileSystem dfs=cluster.getFileSystem();
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
FSDataInputStream in=dfs.open(filepaths[0]);
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
assertEquals(millis,in.readLong());
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
in.close();
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
dfs.close();
}
{
String uri="hdfs://127.0.0.1:" + cluster.getNameNodePort() + "/test/ipAddress/file";
Path path=new Path(uri);
FileSystem fs=FileSystem.get(path.toUri(),conf);
FSDataOutputStream out=fs.create(path);
byte[] buf=new byte[1024];
out.write(buf);
out.close();
FSDataInputStream in=fs.open(path);
in.readFully(buf);
in.close();
fs.close();
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Tests the normal path of batching up BlockLocation[]s to be passed to a
* single{@link DistributedFileSystem#getFileBlockStorageLocations(java.util.List)}call
*/
@Test(timeout=60000) public void testGetFileBlockStorageLocationsBatching() throws Exception {
final Configuration conf=getTestConfiguration();
((Log4JLogger)ProtobufRpcEngine.LOG).getLogger().setLevel(Level.TRACE);
((Log4JLogger)BlockStorageLocationUtil.LOG).getLogger().setLevel(Level.TRACE);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.TRACE);
conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,true);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
final DistributedFileSystem fs=cluster.getFileSystem();
final Path tmpFile1=new Path("/tmpfile1.dat");
final Path tmpFile2=new Path("/tmpfile2.dat");
DFSTestUtil.createFile(fs,tmpFile1,1024,(short)2,0xDEADDEADl);
DFSTestUtil.createFile(fs,tmpFile2,1024,(short)2,0xDEADDEADl);
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
try {
List list=Lists.newArrayList();
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile1,0,1024)));
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile2,0,1024)));
int totalRepl=0;
for ( BlockLocation loc : list) {
totalRepl+=loc.getHosts().length;
}
if (totalRepl == 4) {
return true;
}
}
catch ( IOException e) {
}
return false;
}
}
,500,30000);
BlockLocation[] blockLocs1=fs.getFileBlockLocations(tmpFile1,0,1024);
BlockLocation[] blockLocs2=fs.getFileBlockLocations(tmpFile2,0,1024);
BlockLocation[] blockLocs=(BlockLocation[])ArrayUtils.addAll(blockLocs1,blockLocs2);
BlockStorageLocation[] locs=fs.getFileBlockStorageLocations(Arrays.asList(blockLocs));
int counter=0;
for ( BlockStorageLocation l : locs) {
for (int i=0; i < l.getVolumeIds().length; i++) {
VolumeId id=l.getVolumeIds()[i];
String name=l.getNames()[i];
if (id != null) {
System.out.println("Datanode " + name + " has block "+ counter+ " on volume id "+ id.toString());
}
}
counter++;
}
assertEquals("Expected two HdfsBlockLocations for two 1-block files",2,locs.length);
for ( BlockStorageLocation l : locs) {
assertEquals("Expected two replicas for each block",2,l.getVolumeIds().length);
for (int i=0; i < l.getVolumeIds().length; i++) {
VolumeId id=l.getVolumeIds()[i];
String name=l.getNames()[i];
assertTrue("Expected block to be valid on datanode " + name,id != null);
}
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifierBranchVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testClientThatDoesNotSupportEncryption() throws IOException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build();
fs=getFileSystem(conf);
DFSClient client=DFSClientAdapter.getDFSClient((DistributedFileSystem)fs);
DFSClient spyClient=Mockito.spy(client);
Mockito.doReturn(false).when(spyClient).shouldEncryptData();
DFSClientAdapter.setDFSClient((DistributedFileSystem)fs,spyClient);
LogCapturer logs=GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(DataNode.class));
try {
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")) {
fail("Should not have been able to read without encryption enabled.");
}
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Could not obtain block:",ioe);
}
finally {
logs.stopCapturing();
}
fs.close();
if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")) {
GenericTestUtils.assertMatches(logs.getOutput(),"Failed to read expected encryption handshake from client at");
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Class: org.apache.hadoop.hdfs.TestEncryptionZones
APIUtilityVerifierUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test getEncryptionZoneForPath as a non super user.
*/
@Test(timeout=60000) public void testGetEZAsNonSuperUser() throws Exception {
final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"});
final Path testRoot=new Path(fsHelper.getTestRootDir());
final Path superPath=new Path(testRoot,"superuseronly");
final Path superPathFile=new Path(superPath,"file1");
final Path allPath=new Path(testRoot,"accessall");
final Path allPathFile=new Path(allPath,"file1");
final Path nonEZDir=new Path(testRoot,"nonEZDir");
final Path nonEZFile=new Path(nonEZDir,"file1");
final int len=8192;
fsWrapper.mkdir(testRoot,new FsPermission((short)0777),true);
fsWrapper.mkdir(superPath,new FsPermission((short)0700),false);
fsWrapper.mkdir(allPath,new FsPermission((short)0777),false);
fsWrapper.mkdir(nonEZDir,new FsPermission((short)0777),false);
dfsAdmin.createEncryptionZone(superPath,TEST_KEY);
dfsAdmin.createEncryptionZone(allPath,TEST_KEY);
dfsAdmin.allowSnapshot(new Path("/"));
final Path newSnap=fs.createSnapshot(new Path("/"));
DFSTestUtil.createFile(fs,superPathFile,len,(short)1,0xFEED);
DFSTestUtil.createFile(fs,allPathFile,len,(short)1,0xFEED);
DFSTestUtil.createFile(fs,nonEZFile,len,(short)1,0xFEED);
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final HdfsAdmin userAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf);
try {
userAdmin.getEncryptionZoneForPath(null);
fail("should have thrown NPE");
}
catch ( NullPointerException e) {
}
assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(allPath).getPath().toString());
assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(allPathFile).getPath().toString());
try {
userAdmin.getEncryptionZoneForPath(superPathFile);
fail("expected AccessControlException");
}
catch ( AccessControlException e) {
assertExceptionContains("Permission denied:",e);
}
assertNull("expected null for non-ez path",userAdmin.getEncryptionZoneForPath(nonEZDir));
assertNull("expected null for non-ez path",userAdmin.getEncryptionZoneForPath(nonEZFile));
String snapshottedAllPath=newSnap.toString() + allPath.toString();
assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(new Path(snapshottedAllPath)).getPath().toString());
fs.delete(allPathFile,false);
assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(new Path(snapshottedAllPath)).getPath().toString());
fs.delete(allPath,true);
assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(new Path(snapshottedAllPath)).getPath().toString());
assertNull("expected null for deleted file path",userAdmin.getEncryptionZoneForPath(allPathFile));
assertNull("expected null for deleted directory path",userAdmin.getEncryptionZoneForPath(allPath));
return null;
}
}
);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testReadWrite() throws Exception {
final HdfsAdmin dfsAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf);
final Path baseFile=new Path("/base");
final int len=8192;
DFSTestUtil.createFile(fs,baseFile,len,(short)1,0xFEED);
final Path zone=new Path("/zone");
fs.mkdirs(zone);
dfsAdmin.createEncryptionZone(zone,TEST_KEY);
final Path encFile1=new Path(zone,"myfile");
DFSTestUtil.createFile(fs,encFile1,len,(short)1,0xFEED);
verifyFilesEqual(fs,baseFile,encFile1,len);
assertNumZones(1);
String keyName=dfsAdmin.listEncryptionZones().next().getKeyName();
cluster.getNamesystem().getProvider().rollNewVersion(keyName);
verifyFilesEqual(fs,baseFile,encFile1,len);
final Path encFile2=new Path(zone,"myfile2");
DFSTestUtil.createFile(fs,encFile2,len,(short)1,0xFEED);
FileEncryptionInfo feInfo1=getFileEncryptionInfo(encFile1);
FileEncryptionInfo feInfo2=getFileEncryptionInfo(encFile2);
assertFalse("EDEKs should be different",Arrays.equals(feInfo1.getEncryptedDataEncryptionKey(),feInfo2.getEncryptedDataEncryptionKey()));
assertNotEquals("Key was rolled, versions should be different",feInfo1.getEzKeyVersionName(),feInfo2.getEzKeyVersionName());
verifyFilesEqual(fs,encFile1,encFile2,len);
}
APIUtilityVerifierIterativeVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testCipherSuiteNegotiation() throws Exception {
final HdfsAdmin dfsAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf);
final Path zone=new Path("/zone");
fs.mkdirs(zone);
dfsAdmin.createEncryptionZone(zone,TEST_KEY);
DFSTestUtil.createFile(fs,new Path(zone,"success1"),0,(short)1,0xFEED);
fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(0);
try {
DFSTestUtil.createFile(fs,new Path(zone,"fail"),0,(short)1,0xFEED);
fail("Created a file without specifying a CipherSuite!");
}
catch ( UnknownCipherSuiteException e) {
assertExceptionContains("No cipher suites",e);
}
fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(3);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
try {
DFSTestUtil.createFile(fs,new Path(zone,"fail"),0,(short)1,0xFEED);
fail("Created a file without specifying a CipherSuite!");
}
catch ( UnknownCipherSuiteException e) {
assertExceptionContains("No cipher suites",e);
}
fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(3);
fs.getClient().cipherSuites.add(CipherSuite.AES_CTR_NOPADDING);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
DFSTestUtil.createFile(fs,new Path(zone,"success2"),0,(short)1,0xFEED);
fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(3);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
fs.getClient().cipherSuites.add(CipherSuite.AES_CTR_NOPADDING);
DFSTestUtil.createFile(fs,new Path(zone,"success3"),4096,(short)1,0xFEED);
cluster.getNamesystem().getProvider().flush();
KeyProvider provider=KeyProviderFactory.getProviders(conf).get(0);
List keys=provider.getKeys();
assertEquals("Expected NN to have created one key per zone",1,keys.size());
List allVersions=Lists.newArrayList();
for ( String key : keys) {
List versions=provider.getKeyVersions(key);
assertEquals("Should only have one key version per key",1,versions.size());
allVersions.addAll(versions);
}
for (int i=2; i <= 3; i++) {
FileEncryptionInfo feInfo=getFileEncryptionInfo(new Path(zone.toString() + "/success" + i));
assertEquals(feInfo.getCipherSuite(),CipherSuite.AES_CTR_NOPADDING);
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Tests the retry logic in startFile. We release the lock while generating
* an EDEK, so tricky things can happen in the intervening time.
*/
@Test(timeout=120000) public void testStartFileRetry() throws Exception {
final Path zone1=new Path("/zone1");
final Path file=new Path(zone1,"file1");
fsWrapper.mkdir(zone1,FsPermission.getDirDefault(),true);
ExecutorService executor=Executors.newSingleThreadExecutor();
executor.submit(new InjectFaultTask(){
@Override public void doFault() throws Exception {
dfsAdmin.createEncryptionZone(zone1,TEST_KEY);
}
@Override public void doCleanup() throws Exception {
assertEquals("Expected a startFile retry",2,injector.generateCount);
fsWrapper.delete(file,false);
}
}
).get();
executor.submit(new InjectFaultTask(){
@Override public void doFault() throws Exception {
fsWrapper.delete(zone1,true);
}
@Override public void doCleanup() throws Exception {
assertEquals("Expected no startFile retries",1,injector.generateCount);
fsWrapper.delete(file,false);
}
}
).get();
fsWrapper.mkdir(zone1,FsPermission.getDirDefault(),true);
final String otherKey="otherKey";
DFSTestUtil.createKey(otherKey,cluster,conf);
dfsAdmin.createEncryptionZone(zone1,TEST_KEY);
executor.submit(new InjectFaultTask(){
@Override public void doFault() throws Exception {
fsWrapper.delete(zone1,true);
fsWrapper.mkdir(zone1,FsPermission.getDirDefault(),true);
dfsAdmin.createEncryptionZone(zone1,otherKey);
}
@Override public void doCleanup() throws Exception {
assertEquals("Expected a startFile retry",2,injector.generateCount);
fsWrapper.delete(zone1,true);
}
}
).get();
fsWrapper.mkdir(zone1,FsPermission.getDirDefault(),true);
final String anotherKey="anotherKey";
DFSTestUtil.createKey(anotherKey,cluster,conf);
dfsAdmin.createEncryptionZone(zone1,anotherKey);
String keyToUse=otherKey;
MyInjector injector=new MyInjector();
EncryptionFaultInjector.instance=injector;
Future> future=executor.submit(new CreateFileTask(fsWrapper,file));
for (int i=0; i < 10; i++) {
injector.ready.await();
fsWrapper.delete(zone1,true);
fsWrapper.mkdir(zone1,FsPermission.getDirDefault(),true);
dfsAdmin.createEncryptionZone(zone1,keyToUse);
if (keyToUse == otherKey) {
keyToUse=anotherKey;
}
else {
keyToUse=otherKey;
}
injector.wait.countDown();
injector=new MyInjector();
EncryptionFaultInjector.instance=injector;
}
try {
future.get();
fail("Expected exception from too many retries");
}
catch ( ExecutionException e) {
assertExceptionContains("Too many retries because of encryption zone operations",e.getCause());
}
}
Class: org.apache.hadoop.hdfs.TestFileAppend
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Test two consecutive appends on a file with a full block.
*/
@Test public void testAppendTwice() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
final FileSystem fs1=cluster.getFileSystem();
final FileSystem fs2=AppendTestUtil.createHdfsWithDifferentUsername(conf);
try {
final Path p=new Path("/testAppendTwice/foo");
final int len=1 << 16;
final byte[] fileContents=AppendTestUtil.initBuffer(len);
{
FSDataOutputStream out=fs2.create(p,true,4096,(short)1,len);
out.write(fileContents,0,len);
out.close();
}
fs2.append(p);
fs1.append(p);
Assert.fail();
}
catch ( RemoteException re) {
AppendTestUtil.LOG.info("Got an exception:",re);
Assert.assertEquals(AlreadyBeingCreatedException.class.getName(),re.getClassName());
}
finally {
fs2.close();
fs1.close();
cluster.shutdown();
}
}
Class: org.apache.hadoop.hdfs.TestFileAppend2
UtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
/**
* Creates one file, writes a few bytes to it and then closed it.
* Reopens the same file for appending, write all blocks and then close.
* Verify that all data exists in file.
* @throws IOException an exception might be thrown
*/
@Test public void testSimpleAppend() throws IOException {
final Configuration conf=new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY,50);
fileContents=AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
try {
{
Path file1=new Path("/simpleAppend.dat");
FSDataOutputStream stm=AppendTestUtil.createFile(fs,file1,1);
System.out.println("Created file simpleAppend.dat");
int mid=186;
System.out.println("Writing " + mid + " bytes to file "+ file1);
stm.write(fileContents,0,mid);
stm.close();
System.out.println("Wrote and Closed first part of file.");
int mid2=607;
System.out.println("Writing " + mid + " bytes to file "+ file1);
stm=fs.append(file1);
stm.write(fileContents,mid,mid2 - mid);
stm.close();
System.out.println("Wrote and Closed second part of file.");
stm=fs.append(file1);
assertTrue(stm.getPos() > 0);
System.out.println("Writing " + (AppendTestUtil.FILE_SIZE - mid2) + " bytes to file "+ file1);
stm.write(fileContents,mid2,AppendTestUtil.FILE_SIZE - mid2);
System.out.println("Written second part of file");
stm.close();
System.out.println("Wrote and Closed second part of file.");
AppendTestUtil.checkFullFile(fs,file1,AppendTestUtil.FILE_SIZE,fileContents,"Read 2");
}
{
FSDataOutputStream out=null;
try {
out=fs.append(new Path("/non-existing.dat"));
fail("Expected to have FileNotFoundException");
}
catch ( java.io.FileNotFoundException fnfe) {
System.out.println("Good: got " + fnfe);
fnfe.printStackTrace(System.out);
}
finally {
IOUtils.closeStream(out);
}
}
{
Path root=new Path("/");
fs.setPermission(root,new FsPermission((short)0777));
fs.close();
final UserGroupInformation superuser=UserGroupInformation.getCurrentUser();
String username="testappenduser";
String group="testappendgroup";
assertFalse(superuser.getShortUserName().equals(username));
assertFalse(Arrays.asList(superuser.getGroupNames()).contains(group));
UserGroupInformation appenduser=UserGroupInformation.createUserForTesting(username,new String[]{group});
fs=DFSTestUtil.getFileSystemAs(appenduser,conf);
Path dir=new Path(root,getClass().getSimpleName());
Path foo=new Path(dir,"foo.dat");
FSDataOutputStream out=null;
int offset=0;
try {
out=fs.create(foo);
int len=10 + AppendTestUtil.nextInt(100);
out.write(fileContents,offset,len);
offset+=len;
}
finally {
IOUtils.closeStream(out);
}
fs.setPermission(dir,new FsPermission((short)0100));
fs.setPermission(foo,new FsPermission((short)0200));
out=null;
try {
out=fs.append(foo);
int len=10 + AppendTestUtil.nextInt(100);
out.write(fileContents,offset,len);
offset+=len;
}
finally {
IOUtils.closeStream(out);
}
fs.setPermission(foo,new FsPermission((short)0577));
fs.setPermission(dir,new FsPermission((short)0777));
out=null;
try {
out=fs.append(foo);
fail("Expected to have AccessControlException");
}
catch ( AccessControlException ace) {
System.out.println("Good: got " + ace);
ace.printStackTrace(System.out);
}
finally {
IOUtils.closeStream(out);
}
}
}
catch ( IOException e) {
System.out.println("Exception :" + e);
throw e;
}
catch ( Throwable e) {
System.out.println("Throwable :" + e);
e.printStackTrace();
throw new IOException("Throwable : " + e);
}
finally {
fs.close();
cluster.shutdown();
}
}
Class: org.apache.hadoop.hdfs.TestFileAppend3
APIUtilityVerifierIterativeVerifierBranchVerifierInternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* TC11: Racing rename
* @throws IOException an exception might be thrown
*/
@Test public void testTC11() throws Exception {
final Path p=new Path("/TC11/foo");
System.out.println("p=" + p);
final int len1=(int)BLOCK_SIZE;
{
FSDataOutputStream out=fs.create(p,false,buffersize,REPLICATION,BLOCK_SIZE);
AppendTestUtil.write(out,0,len1);
out.close();
}
FSDataOutputStream out=fs.append(p);
final int len2=(int)BLOCK_SIZE / 2;
AppendTestUtil.write(out,len1,len2);
out.hflush();
final Path pnew=new Path(p + ".new");
assertTrue(fs.rename(p,pnew));
out.close();
final long len=fs.getFileStatus(pnew).getLen();
final LocatedBlocks locatedblocks=fs.dfs.getNamenode().getBlockLocations(pnew.toString(),0L,len);
final int numblock=locatedblocks.locatedBlockCount();
for (int i=0; i < numblock; i++) {
final LocatedBlock lb=locatedblocks.get(i);
final ExtendedBlock blk=lb.getBlock();
final long size=lb.getBlockSize();
if (i < numblock - 1) {
assertEquals(BLOCK_SIZE,size);
}
for ( DatanodeInfo datanodeinfo : lb.getLocations()) {
final DataNode dn=cluster.getDataNode(datanodeinfo.getIpcPort());
final Block metainfo=DataNodeTestUtils.getFSDataset(dn).getStoredBlock(blk.getBlockPoolId(),blk.getBlockId());
assertEquals(size,metainfo.getNumBytes());
}
}
}
Class: org.apache.hadoop.hdfs.TestFileAppend4
UtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
/**
* Test that an append with no locations fails with an exception
* showing insufficient locations.
*/
@Test(timeout=60000) public void testAppendInsufficientLocations() throws Exception {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,3000);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
DistributedFileSystem fileSystem=null;
try {
fileSystem=cluster.getFileSystem();
Path f=new Path("/testAppend");
FSDataOutputStream create=fileSystem.create(f,(short)2);
create.write("/testAppend".getBytes());
create.close();
DFSTestUtil.waitReplication(fileSystem,f,(short)2);
LocatedBlocks lbs=fileSystem.dfs.getNamenode().getBlockLocations("/testAppend",0,Long.MAX_VALUE);
List dnsOfCluster=cluster.getDataNodes();
DatanodeInfo[] dnsWithLocations=lbs.getLastLocatedBlock().getLocations();
for ( DataNode dn : dnsOfCluster) {
for ( DatanodeInfo loc : dnsWithLocations) {
if (dn.getDatanodeId().equals(loc)) {
dn.shutdown();
DFSTestUtil.waitForDatanodeDeath(dn);
}
}
}
DFSTestUtil.waitReplication(fileSystem,f,(short)0);
try {
fileSystem.append(f);
fail("Append should fail because insufficient locations");
}
catch ( IOException e) {
LOG.info("Expected exception: ",e);
}
FSDirectory dir=cluster.getNamesystem().getFSDirectory();
final INodeFile inode=INodeFile.valueOf(dir.getINode("/testAppend"),"/testAppend");
assertTrue("File should remain closed",!inode.isUnderConstruction());
}
finally {
if (null != fileSystem) {
fileSystem.close();
}
cluster.shutdown();
}
}
APIUtilityVerifierBooleanVerifierNullVerifierHybridVerifier
/**
* Test case that stops a writer after finalizing a block but
* before calling completeFile, and then tries to recover
* the lease from another thread.
*/
@Test(timeout=60000) public void testRecoverFinalizedBlock() throws Throwable {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
try {
cluster.waitActive();
NamenodeProtocols preSpyNN=cluster.getNameNodeRpc();
NamenodeProtocols spyNN=spy(preSpyNN);
GenericTestUtils.DelayAnswer delayer=new GenericTestUtils.DelayAnswer(LOG);
doAnswer(delayer).when(spyNN).complete(anyString(),anyString(),(ExtendedBlock)anyObject(),anyLong());
DFSClient client=new DFSClient(null,spyNN,conf,null);
file1=new Path("/testRecoverFinalized");
final OutputStream stm=client.create("/testRecoverFinalized",true);
AppendTestUtil.write(stm,0,4096);
final AtomicReference err=new AtomicReference();
Thread t=new Thread(){
@Override public void run(){
try {
stm.close();
}
catch ( Throwable t) {
err.set(t);
}
}
}
;
t.start();
LOG.info("Waiting for close to get to latch...");
delayer.waitForCall();
LOG.info("Killing lease checker");
client.getLeaseRenewer().interruptAndJoin();
FileSystem fs1=cluster.getFileSystem();
FileSystem fs2=AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf());
LOG.info("Recovering file");
recoverFile(fs2);
LOG.info("Telling close to proceed.");
delayer.proceed();
LOG.info("Waiting for close to finish.");
t.join();
LOG.info("Close finished.");
Throwable thrownByClose=err.get();
assertNotNull(thrownByClose);
assertTrue(thrownByClose instanceof IOException);
if (!thrownByClose.getMessage().contains("No lease on /testRecoverFinalized")) throw thrownByClose;
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifierBooleanVerifierNullVerifierHybridVerifier
/**
* Test case that stops a writer after finalizing a block but
* before calling completeFile, recovers a file from another writer,
* starts writing from that writer, and then has the old lease holder
* call completeFile
*/
@Test(timeout=60000) public void testCompleteOtherLeaseHoldersFile() throws Throwable {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
try {
cluster.waitActive();
NamenodeProtocols preSpyNN=cluster.getNameNodeRpc();
NamenodeProtocols spyNN=spy(preSpyNN);
GenericTestUtils.DelayAnswer delayer=new GenericTestUtils.DelayAnswer(LOG);
doAnswer(delayer).when(spyNN).complete(anyString(),anyString(),(ExtendedBlock)anyObject(),anyLong());
DFSClient client=new DFSClient(null,spyNN,conf,null);
file1=new Path("/testCompleteOtherLease");
final OutputStream stm=client.create("/testCompleteOtherLease",true);
AppendTestUtil.write(stm,0,4096);
final AtomicReference err=new AtomicReference();
Thread t=new Thread(){
@Override public void run(){
try {
stm.close();
}
catch ( Throwable t) {
err.set(t);
}
}
}
;
t.start();
LOG.info("Waiting for close to get to latch...");
delayer.waitForCall();
LOG.info("Killing lease checker");
client.getLeaseRenewer().interruptAndJoin();
FileSystem fs1=cluster.getFileSystem();
FileSystem fs2=AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf());
LOG.info("Recovering file");
recoverFile(fs2);
LOG.info("Opening file for append from new fs");
FSDataOutputStream appenderStream=fs2.append(file1);
LOG.info("Writing some data from new appender");
AppendTestUtil.write(appenderStream,0,4096);
LOG.info("Telling old close to proceed.");
delayer.proceed();
LOG.info("Waiting for close to finish.");
t.join();
LOG.info("Close finished.");
Throwable thrownByClose=err.get();
assertNotNull(thrownByClose);
assertTrue(thrownByClose instanceof IOException);
if (!thrownByClose.getMessage().contains("Lease mismatch")) throw thrownByClose;
appenderStream.close();
}
finally {
cluster.shutdown();
}
}
UtilityVerifierNullVerifierHybridVerifier
@Test(timeout=30000) public void testImmediateReadOfNewFile() throws IOException {
final int blockSize=64 * 1024;
final int writeSize=10 * blockSize;
Configuration conf=new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,blockSize);
init(conf);
final int requiredSuccessfulOpens=100;
final Path file=new Path("/file1");
final AtomicBoolean openerDone=new AtomicBoolean(false);
final AtomicReference errorMessage=new AtomicReference();
final FSDataOutputStream out=fileSystem.create(file);
final Thread writer=new Thread(new Runnable(){
@Override public void run(){
try {
while (!openerDone.get()) {
out.write(DFSTestUtil.generateSequentialBytes(0,writeSize));
out.hflush();
}
}
catch ( IOException e) {
LOG.warn("error in writer",e);
}
finally {
try {
out.close();
}
catch ( IOException e) {
LOG.error("unable to close file");
}
}
}
}
);
Thread opener=new Thread(new Runnable(){
@Override public void run(){
try {
for (int i=0; i < requiredSuccessfulOpens; i++) {
fileSystem.open(file).close();
}
openerDone.set(true);
}
catch ( IOException e) {
openerDone.set(true);
errorMessage.set(String.format("got exception : %s",StringUtils.stringifyException(e)));
}
catch ( Exception e) {
openerDone.set(true);
errorMessage.set(String.format("got exception : %s",StringUtils.stringifyException(e)));
writer.interrupt();
fail("here");
}
}
}
);
writer.start();
opener.start();
try {
writer.join();
opener.join();
}
catch ( InterruptedException e) {
Thread.currentThread().interrupt();
}
assertNull(errorMessage.get(),errorMessage.get());
}
Class: org.apache.hadoop.hdfs.TestFileCorruption
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test the case that a replica is reported corrupt while it is not
* in blocksMap. Make sure that ArrayIndexOutOfBounds does not thrown.
* See Hadoop-4351.
*/
@Test public void testArrayOutOfBoundsException() throws Exception {
MiniDFSCluster cluster=null;
try {
Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
final Path FILE_PATH=new Path("/tmp.txt");
final long FILE_LEN=1L;
DFSTestUtil.createFile(fs,FILE_PATH,FILE_LEN,(short)2,1L);
final String bpid=cluster.getNamesystem().getBlockPoolId();
File storageDir=cluster.getInstanceStorageDir(0,0);
File dataDir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
assertTrue("Data directory does not exist",dataDir.exists());
ExtendedBlock blk=getBlock(bpid,dataDir);
if (blk == null) {
storageDir=cluster.getInstanceStorageDir(0,1);
dataDir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
blk=getBlock(bpid,dataDir);
}
assertFalse("Data directory does not contain any blocks or there was an " + "IO error",blk == null);
cluster.startDataNodes(conf,1,true,null,null);
ArrayList datanodes=cluster.getDataNodes();
assertEquals(datanodes.size(),3);
DataNode dataNode=datanodes.get(2);
DatanodeRegistration dnR=DataNodeTestUtils.getDNRegistrationForBP(dataNode,blk.getBlockPoolId());
FSNamesystem ns=cluster.getNamesystem();
ns.writeLock();
try {
cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(blk,new DatanodeInfo(dnR),"TEST","STORAGE_ID");
}
finally {
ns.writeUnlock();
}
fs.open(FILE_PATH);
fs.delete(FILE_PATH,false);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Class: org.apache.hadoop.hdfs.TestFileCreation
APIUtilityVerifierBranchVerifierInternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* Create a file, write something, hflush but not close.
* Then change lease period and wait for lease recovery.
* Finally, read the block directly from each Datanode and verify the content.
*/
@Test public void testLeaseExpireHardLimit() throws Exception {
System.out.println("testLeaseExpireHardLimit start");
final long leasePeriod=1000;
final int DATANODE_NUM=3;
Configuration conf=new HdfsConfiguration();
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
DistributedFileSystem dfs=null;
try {
cluster.waitActive();
dfs=cluster.getFileSystem();
final String f=DIR + "foo";
final Path fpath=new Path(f);
HdfsDataOutputStream out=create(dfs,fpath,DATANODE_NUM);
out.write("something".getBytes());
out.hflush();
int actualRepl=out.getCurrentBlockReplication();
assertTrue(f + " should be replicated to " + DATANODE_NUM+ " datanodes.",actualRepl == DATANODE_NUM);
cluster.setLeasePeriod(leasePeriod,leasePeriod);
try {
Thread.sleep(5 * leasePeriod);
}
catch ( InterruptedException e) {
}
LocatedBlocks locations=dfs.dfs.getNamenode().getBlockLocations(f,0,Long.MAX_VALUE);
assertEquals(1,locations.locatedBlockCount());
LocatedBlock locatedblock=locations.getLocatedBlocks().get(0);
int successcount=0;
for ( DatanodeInfo datanodeinfo : locatedblock.getLocations()) {
DataNode datanode=cluster.getDataNode(datanodeinfo.getIpcPort());
ExtendedBlock blk=locatedblock.getBlock();
Block b=DataNodeTestUtils.getFSDataset(datanode).getStoredBlock(blk.getBlockPoolId(),blk.getBlockId());
final File blockfile=DataNodeTestUtils.getFile(datanode,blk.getBlockPoolId(),b.getBlockId());
System.out.println("blockfile=" + blockfile);
if (blockfile != null) {
BufferedReader in=new BufferedReader(new FileReader(blockfile));
assertEquals("something",in.readLine());
in.close();
successcount++;
}
}
System.out.println("successcount=" + successcount);
assertTrue(successcount > 0);
}
finally {
IOUtils.closeStream(dfs);
cluster.shutdown();
}
System.out.println("testLeaseExpireHardLimit successful");
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* Test that file leases are persisted across namenode restarts.
*/
@Test public void testFileCreationNamenodeRestart() throws IOException {
Configuration conf=new HdfsConfiguration();
final int MAX_IDLE_TIME=2000;
conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME);
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY,1);
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem fs=null;
try {
cluster.waitActive();
fs=cluster.getFileSystem();
final int nnport=cluster.getNameNodePort();
Path file1=new Path("/filestatus.dat");
HdfsDataOutputStream stm=create(fs,file1,1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file1);
assertEquals(file1 + " should be replicated to 1 datanode.",1,stm.getCurrentBlockReplication());
writeFile(stm,numBlocks * blockSize);
stm.hflush();
assertEquals(file1 + " should still be replicated to 1 datanode.",1,stm.getCurrentBlockReplication());
Path fileRenamed=new Path("/filestatusRenamed.dat");
fs.rename(file1,fileRenamed);
System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file1 + " to "+ fileRenamed);
file1=fileRenamed;
Path file2=new Path("/filestatus2.dat");
FSDataOutputStream stm2=createFile(fs,file2,1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file2);
Path file3=new Path("/user/home/fullpath.dat");
FSDataOutputStream stm3=createFile(fs,file3,1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file3);
Path file4=new Path("/user/home/fullpath4.dat");
FSDataOutputStream stm4=createFile(fs,file4,1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file4);
fs.mkdirs(new Path("/bin"));
fs.rename(new Path("/user/home"),new Path("/bin"));
Path file3new=new Path("/bin/home/fullpath.dat");
System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file3 + " to "+ file3new);
Path file4new=new Path("/bin/home/fullpath4.dat");
System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file4 + " to "+ file4new);
cluster.shutdown();
try {
Thread.sleep(2 * MAX_IDLE_TIME);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
cluster.shutdown();
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DFSOutputStream dfstream=(DFSOutputStream)(stm.getWrappedStream());
dfstream.setTestFilename(file1.toString());
dfstream=(DFSOutputStream)(stm3.getWrappedStream());
dfstream.setTestFilename(file3new.toString());
dfstream=(DFSOutputStream)(stm4.getWrappedStream());
dfstream.setTestFilename(file4new.toString());
byte[] buffer=AppendTestUtil.randomBytes(seed,1);
stm.write(buffer);
stm.close();
stm2.write(buffer);
stm2.close();
stm3.close();
stm4.close();
DFSClient client=fs.dfs;
LocatedBlocks locations=client.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
assertTrue("Error blocks were not cleaned up for file " + file1,locations.locatedBlockCount() == 3);
locations=client.getNamenode().getBlockLocations(file2.toString(),0,Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
assertTrue("Error blocks were not cleaned up for file " + file2,locations.locatedBlockCount() == 1);
}
finally {
IOUtils.closeStream(fs);
cluster.shutdown();
}
}
UtilityVerifierAssumptionSetterEqualityVerifierHybridVerifier
/**
* Same test but the client should bind to a local interface
*/
@Test public void testFileCreationSetLocalInterface() throws IOException {
assumeTrue(System.getProperty("os.name").startsWith("Linux"));
checkFileCreation("lo",false);
try {
checkFileCreation("bogus-interface",false);
fail("Able to specify a bogus interface");
}
catch ( UnknownHostException e) {
assertEquals("No such interface bogus-interface",e.getMessage());
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Tests the fileLength when we sync the file and restart the cluster and
* Datanodes not report to Namenode yet.
*/
@Test(timeout=60000) public void testFileLengthWithHSyncAndClusterRestartWithOutDNsRegister() throws Exception {
final Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,512);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
HdfsDataInputStream in=null;
try {
Path path=new Path("/tmp/TestFileLengthOnClusterRestart","test");
DistributedFileSystem dfs=cluster.getFileSystem();
FSDataOutputStream out=dfs.create(path);
int fileLength=1030;
out.write(new byte[fileLength]);
out.hsync();
cluster.restartNameNode();
cluster.waitActive();
in=(HdfsDataInputStream)dfs.open(path,1024);
Assert.assertEquals(fileLength,in.getVisibleLength());
cluster.shutdownDataNodes();
cluster.restartNameNode(false);
verifyNNIsInSafeMode(dfs);
try {
in=(HdfsDataInputStream)dfs.open(path);
Assert.fail("Expected IOException");
}
catch ( IOException e) {
Assert.assertTrue(e.getLocalizedMessage().indexOf("Name node is in safe mode") >= 0);
}
}
finally {
if (null != in) {
in.close();
}
cluster.shutdown();
}
}
Class: org.apache.hadoop.hdfs.TestFileStatus
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test the FileStatus obtained calling listStatus on a file
*/
@Test public void testListStatusOnFile() throws IOException {
FileStatus[] stats=fs.listStatus(file1);
assertEquals(1,stats.length);
FileStatus status=stats[0];
assertFalse(file1 + " should be a file",status.isDirectory());
assertEquals(blockSize,status.getBlockSize());
assertEquals(1,status.getReplication());
assertEquals(fileSize,status.getLen());
assertEquals(file1.makeQualified(fs.getUri(),fs.getWorkingDirectory()).toString(),status.getPath().toString());
RemoteIterator itor=fc.listStatus(file1);
status=itor.next();
assertEquals(stats[0],status);
assertFalse(file1 + " should be a file",status.isDirectory());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test the FileStatus obtained calling getFileStatus on a file
*/
@Test public void testGetFileStatusOnFile() throws Exception {
checkFile(fs,file1,1);
FileStatus status=fs.getFileStatus(file1);
assertFalse(file1 + " should be a file",status.isDirectory());
assertEquals(blockSize,status.getBlockSize());
assertEquals(1,status.getReplication());
assertEquals(fileSize,status.getLen());
assertEquals(file1.makeQualified(fs.getUri(),fs.getWorkingDirectory()).toString(),status.getPath().toString());
}
UtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test getting a FileStatus object using a non-existant path
*/
@Test public void testGetFileStatusOnNonExistantFileDir() throws IOException {
Path dir=new Path("/test/mkdirs");
try {
fs.listStatus(dir);
fail("listStatus of non-existent path should fail");
}
catch ( FileNotFoundException fe) {
assertEquals("File " + dir + " does not exist.",fe.getMessage());
}
try {
fc.listStatus(dir);
fail("listStatus of non-existent path should fail");
}
catch ( FileNotFoundException fe) {
assertEquals("File " + dir + " does not exist.",fe.getMessage());
}
try {
fs.getFileStatus(dir);
fail("getFileStatus of non-existent path should fail");
}
catch ( FileNotFoundException fe) {
assertTrue("Exception doesn't indicate non-existant path",fe.getMessage().startsWith("File does not exist"));
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test calling getFileInfo directly on the client
*/
@Test public void testGetFileInfo() throws IOException {
Path path=new Path("/");
assertTrue("/ should be a directory",fs.getFileStatus(path).isDirectory());
HdfsFileStatus fileInfo=dfsClient.getFileInfo("/noSuchFile");
assertEquals("Non-existant file should result in null",null,fileInfo);
Path path1=new Path("/name1");
Path path2=new Path("/name1/name2");
assertTrue(fs.mkdirs(path1));
FSDataOutputStream out=fs.create(path2,false);
out.close();
fileInfo=dfsClient.getFileInfo(path1.toString());
assertEquals(1,fileInfo.getChildrenNum());
fileInfo=dfsClient.getFileInfo(path2.toString());
assertEquals(0,fileInfo.getChildrenNum());
try {
dfsClient.getFileInfo("non-absolute");
fail("getFileInfo for a non-absolute path did not throw IOException");
}
catch ( RemoteException re) {
assertTrue("Wrong exception for invalid file name",re.toString().contains("Invalid file name"));
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test FileStatus objects obtained from a directory
*/
@Test public void testGetFileStatusOnDir() throws Exception {
Path dir=new Path("/test/mkdirs");
assertTrue("mkdir failed",fs.mkdirs(dir));
assertTrue("mkdir failed",fs.exists(dir));
FileStatus status=fs.getFileStatus(dir);
assertTrue(dir + " should be a directory",status.isDirectory());
assertTrue(dir + " should be zero size ",status.getLen() == 0);
assertEquals(dir.makeQualified(fs.getUri(),fs.getWorkingDirectory()).toString(),status.getPath().toString());
FileStatus[] stats=fs.listStatus(dir);
assertEquals(dir + " should be empty",0,stats.length);
assertEquals(dir + " should be zero size ",0,fs.getContentSummary(dir).getLength());
RemoteIterator itor=fc.listStatus(dir);
assertFalse(dir + " should be empty",itor.hasNext());
Path file2=new Path(dir,"filestatus2.dat");
DFSTestUtil.createFile(fs,file2,blockSize / 4,blockSize / 4,blockSize,(short)1,seed);
checkFile(fs,file2,1);
status=fs.getFileStatus(file2);
assertEquals(blockSize,status.getBlockSize());
assertEquals(1,status.getReplication());
file2=fs.makeQualified(file2);
assertEquals(file2.toString(),status.getPath().toString());
Path file3=new Path(dir,"filestatus3.dat");
DFSTestUtil.createFile(fs,file3,blockSize / 4,blockSize / 4,blockSize,(short)1,seed);
checkFile(fs,file3,1);
file3=fs.makeQualified(file3);
final int expected=blockSize / 2;
assertEquals(dir + " size should be " + expected,expected,fs.getContentSummary(dir).getLength());
stats=fs.listStatus(dir);
assertEquals(dir + " should have two entries",2,stats.length);
assertEquals(file2.toString(),stats[0].getPath().toString());
assertEquals(file3.toString(),stats[1].getPath().toString());
itor=fc.listStatus(dir);
assertEquals(file2.toString(),itor.next().getPath().toString());
assertEquals(file3.toString(),itor.next().getPath().toString());
assertFalse("Unexpected addtional file",itor.hasNext());
Path dir3=fs.makeQualified(new Path(dir,"dir3"));
fs.mkdirs(dir3);
dir3=fs.makeQualified(dir3);
stats=fs.listStatus(dir);
assertEquals(dir + " should have three entries",3,stats.length);
assertEquals(dir3.toString(),stats[0].getPath().toString());
assertEquals(file2.toString(),stats[1].getPath().toString());
assertEquals(file3.toString(),stats[2].getPath().toString());
itor=fc.listStatus(dir);
assertEquals(dir3.toString(),itor.next().getPath().toString());
assertEquals(file2.toString(),itor.next().getPath().toString());
assertEquals(file3.toString(),itor.next().getPath().toString());
assertFalse("Unexpected addtional file",itor.hasNext());
Path dir4=fs.makeQualified(new Path(dir,"dir4"));
fs.mkdirs(dir4);
dir4=fs.makeQualified(dir4);
Path dir5=fs.makeQualified(new Path(dir,"dir5"));
fs.mkdirs(dir5);
dir5=fs.makeQualified(dir5);
stats=fs.listStatus(dir);
assertEquals(dir + " should have five entries",5,stats.length);
assertEquals(dir3.toString(),stats[0].getPath().toString());
assertEquals(dir4.toString(),stats[1].getPath().toString());
assertEquals(dir5.toString(),stats[2].getPath().toString());
assertEquals(file2.toString(),stats[3].getPath().toString());
assertEquals(file3.toString(),stats[4].getPath().toString());
itor=fc.listStatus(dir);
assertEquals(dir3.toString(),itor.next().getPath().toString());
assertEquals(dir4.toString(),itor.next().getPath().toString());
assertEquals(dir5.toString(),itor.next().getPath().toString());
assertEquals(file2.toString(),itor.next().getPath().toString());
assertEquals(file3.toString(),itor.next().getPath().toString());
assertFalse(itor.hasNext());
fs.delete(dir,true);
}
Class: org.apache.hadoop.hdfs.TestGetBlocks
APIUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test if the datanodes returned by{@link ClientProtocol#getBlockLocations(String,long,long)} is correct
* when stale nodes checking is enabled. Also test during the scenario when 1)
* stale nodes checking is enabled, 2) a writing is going on, 3) a datanode
* becomes stale happen simultaneously
* @throws Exception
*/
@Test public void testReadSelectNonStaleDatanode() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY,true);
long staleInterval=30 * 1000 * 60;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,staleInterval);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).racks(racks).build();
cluster.waitActive();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
List nodeInfoList=cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanodeListForReport(DatanodeReportType.LIVE);
assertEquals("Unexpected number of datanodes",numDatanodes,nodeInfoList.size());
FileSystem fileSys=cluster.getFileSystem();
FSDataOutputStream stm=null;
try {
final Path fileName=new Path("/file1");
stm=fileSys.create(fileName,true,fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY,4096),(short)3,blockSize);
stm.write(new byte[(blockSize * 3) / 2]);
stm.hflush();
LocatedBlocks blocks=client.getNamenode().getBlockLocations(fileName.toString(),0,blockSize);
DatanodeInfo[] nodes=blocks.get(0).getLocations();
assertEquals(nodes.length,3);
DataNode staleNode=null;
DatanodeDescriptor staleNodeInfo=null;
staleNode=this.stopDataNodeHeartbeat(cluster,nodes[0].getHostName());
assertNotNull(staleNode);
staleNodeInfo=cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(staleNode.getDatanodeId());
staleNodeInfo.setLastUpdate(Time.now() - staleInterval - 1);
LocatedBlocks blocksAfterStale=client.getNamenode().getBlockLocations(fileName.toString(),0,blockSize);
DatanodeInfo[] nodesAfterStale=blocksAfterStale.get(0).getLocations();
assertEquals(nodesAfterStale.length,3);
assertEquals(nodesAfterStale[2].getHostName(),nodes[0].getHostName());
DataNodeTestUtils.setHeartbeatsDisabledForTests(staleNode,false);
staleNodeInfo.setLastUpdate(Time.now());
LocatedBlock lastBlock=client.getLocatedBlocks(fileName.toString(),0,Long.MAX_VALUE).getLastLocatedBlock();
nodes=lastBlock.getLocations();
assertEquals(nodes.length,3);
staleNode=this.stopDataNodeHeartbeat(cluster,nodes[0].getHostName());
assertNotNull(staleNode);
cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(staleNode.getDatanodeId()).setLastUpdate(Time.now() - staleInterval - 1);
LocatedBlock lastBlockAfterStale=client.getLocatedBlocks(fileName.toString(),0,Long.MAX_VALUE).getLastLocatedBlock();
nodesAfterStale=lastBlockAfterStale.getLocations();
assertEquals(nodesAfterStale.length,3);
assertEquals(nodesAfterStale[2].getHostName(),nodes[0].getHostName());
}
finally {
if (stm != null) {
stm.close();
}
if (client != null) {
client.close();
}
cluster.shutdown();
}
}
Class: org.apache.hadoop.hdfs.TestHdfsAdmin
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that we can set and clear quotas via {@link HdfsAdmin}.
*/
@Test public void testHdfsAdminSetQuota() throws Exception {
HdfsAdmin dfsAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf);
FileSystem fs=null;
try {
fs=FileSystem.get(conf);
assertTrue(fs.mkdirs(TEST_PATH));
assertEquals(-1,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(-1,fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.setSpaceQuota(TEST_PATH,10);
assertEquals(-1,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(10,fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.setQuota(TEST_PATH,10);
assertEquals(10,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(10,fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.clearSpaceQuota(TEST_PATH);
assertEquals(10,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(-1,fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.clearQuota(TEST_PATH);
assertEquals(-1,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(-1,fs.getContentSummary(TEST_PATH).getSpaceQuota());
}
finally {
if (fs != null) {
fs.close();
}
}
}
Class: org.apache.hadoop.hdfs.TestLease
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@SuppressWarnings("unchecked") @Test public void testFactory() throws Exception {
final String[] groups=new String[]{"supergroup"};
final UserGroupInformation[] ugi=new UserGroupInformation[3];
for (int i=0; i < ugi.length; i++) {
ugi[i]=UserGroupInformation.createUserForTesting("user" + i,groups);
}
Mockito.doReturn(new HdfsFileStatus(0,false,1,1024,0,0,new FsPermission((short)777),"owner","group",new byte[0],new byte[0],1010,0,null)).when(mcp).getFileInfo(anyString());
Mockito.doReturn(new HdfsFileStatus(0,false,1,1024,0,0,new FsPermission((short)777),"owner","group",new byte[0],new byte[0],1010,0,null)).when(mcp).create(anyString(),(FsPermission)anyObject(),anyString(),(EnumSetWritable)anyObject(),anyBoolean(),anyShort(),anyLong(),(List)anyList());
final Configuration conf=new Configuration();
final DFSClient c1=createDFSClientAs(ugi[0],conf);
FSDataOutputStream out1=createFsOut(c1,"/out1");
final DFSClient c2=createDFSClientAs(ugi[0],conf);
FSDataOutputStream out2=createFsOut(c2,"/out2");
Assert.assertEquals(c1.getLeaseRenewer(),c2.getLeaseRenewer());
final DFSClient c3=createDFSClientAs(ugi[1],conf);
FSDataOutputStream out3=createFsOut(c3,"/out3");
Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer());
final DFSClient c4=createDFSClientAs(ugi[1],conf);
FSDataOutputStream out4=createFsOut(c4,"/out4");
Assert.assertEquals(c3.getLeaseRenewer(),c4.getLeaseRenewer());
final DFSClient c5=createDFSClientAs(ugi[2],conf);
FSDataOutputStream out5=createFsOut(c5,"/out5");
Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer());
Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that we can open up a file for write, move it to another location,
* and then create a new file in the previous location, without causing any
* lease conflicts. This is possible because we now use unique inode IDs
* to identify files to the NameNode.
*/
@Test public void testLeaseAfterRenameAndRecreate() throws Exception {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
final Path path1=new Path("/test-file");
final String contents1="contents1";
final Path path2=new Path("/test-file-new-location");
final String contents2="contents2";
FileSystem fs=cluster.getFileSystem();
FSDataOutputStream out1=fs.create(path1);
out1.writeBytes(contents1);
Assert.assertTrue(hasLease(cluster,path1));
Assert.assertEquals(1,leaseCount(cluster));
DistributedFileSystem fs2=(DistributedFileSystem)FileSystem.newInstance(fs.getUri(),fs.getConf());
fs2.rename(path1,path2);
FSDataOutputStream out2=fs2.create(path1);
out2.writeBytes(contents2);
out2.close();
Assert.assertTrue(hasLease(cluster,path2));
out1.close();
DistributedFileSystem fs3=(DistributedFileSystem)FileSystem.newInstance(fs.getUri(),fs.getConf());
Assert.assertEquals(contents1,DFSTestUtil.readFile(fs3,path2));
Assert.assertEquals(contents2,DFSTestUtil.readFile(fs3,path1));
}
finally {
cluster.shutdown();
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testLeaseAfterRename() throws Exception {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
Path p=new Path("/test-file");
Path d=new Path("/test-d");
Path d2=new Path("/test-d-other");
FileSystem fs=cluster.getFileSystem();
FSDataOutputStream out=fs.create(p);
out.writeBytes("something");
Assert.assertTrue(hasLease(cluster,p));
Assert.assertEquals(1,leaseCount(cluster));
DistributedFileSystem fs2=(DistributedFileSystem)FileSystem.newInstance(fs.getUri(),fs.getConf());
LOG.info("DMS: rename file into dir");
Path pRenamed=new Path(d,p.getName());
fs2.mkdirs(d);
fs2.rename(p,pRenamed);
Assert.assertFalse(p + " exists",fs2.exists(p));
Assert.assertTrue(pRenamed + " not found",fs2.exists(pRenamed));
Assert.assertFalse("has lease for " + p,hasLease(cluster,p));
Assert.assertTrue("no lease for " + pRenamed,hasLease(cluster,pRenamed));
Assert.assertEquals(1,leaseCount(cluster));
LOG.info("DMS: rename parent dir");
Path pRenamedAgain=new Path(d2,pRenamed.getName());
fs2.rename(d,d2);
Assert.assertFalse(d + " exists",fs2.exists(d));
Assert.assertFalse("has lease for " + pRenamed,hasLease(cluster,pRenamed));
Assert.assertTrue(d2 + " not found",fs2.exists(d2));
Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain));
Assert.assertEquals(1,leaseCount(cluster));
LOG.info("DMS: rename parent again");
pRenamed=pRenamedAgain;
pRenamedAgain=new Path(new Path(d,d2.getName()),p.getName());
fs2.mkdirs(d);
fs2.rename(d2,d);
Assert.assertFalse(d2 + " exists",fs2.exists(d2));
Assert.assertFalse("no lease for " + pRenamed,hasLease(cluster,pRenamed));
Assert.assertTrue(d + " not found",fs2.exists(d));
Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain));
Assert.assertEquals(1,leaseCount(cluster));
pRenamed=pRenamedAgain;
pRenamedAgain=new Path(d2,p.getName());
fs2.rename(pRenamed.getParent(),d2,Options.Rename.OVERWRITE);
Assert.assertFalse(pRenamed.getParent() + " not found",fs2.exists(pRenamed.getParent()));
Assert.assertFalse("has lease for " + pRenamed,hasLease(cluster,pRenamed));
Assert.assertTrue(d2 + " not found",fs2.exists(d2));
Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain));
Assert.assertEquals(1,leaseCount(cluster));
pRenamed=pRenamedAgain;
pRenamedAgain=new Path(d,p.getName());
fs2.rename(pRenamed.getParent(),d,Options.Rename.OVERWRITE);
Assert.assertFalse(pRenamed.getParent() + " not found",fs2.exists(pRenamed.getParent()));
Assert.assertFalse("has lease for " + pRenamed,hasLease(cluster,pRenamed));
Assert.assertTrue(d + " not found",fs2.exists(d));
Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain));
Assert.assertEquals(1,leaseCount(cluster));
out.close();
}
finally {
cluster.shutdown();
}
}
Class: org.apache.hadoop.hdfs.TestLeaseRecovery
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* The following test first creates a file with a few blocks.
* It randomly truncates the replica of the last block stored in each datanode.
* Finally, it triggers block synchronization to synchronize all stored block.
*/
@Test public void testBlockSynchronization() throws Exception {
final int ORG_FILE_SIZE=3000;
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
cluster.waitActive();
DistributedFileSystem dfs=cluster.getFileSystem();
String filestr="/foo";
Path filepath=new Path(filestr);
DFSTestUtil.createFile(dfs,filepath,ORG_FILE_SIZE,REPLICATION_NUM,0L);
assertTrue(dfs.exists(filepath));
DFSTestUtil.waitReplication(dfs,filepath,REPLICATION_NUM);
LocatedBlock locatedblock=TestInterDatanodeProtocol.getLastLocatedBlock(dfs.dfs.getNamenode(),filestr);
DatanodeInfo[] datanodeinfos=locatedblock.getLocations();
assertEquals(REPLICATION_NUM,datanodeinfos.length);
DataNode[] datanodes=new DataNode[REPLICATION_NUM];
for (int i=0; i < REPLICATION_NUM; i++) {
datanodes[i]=cluster.getDataNode(datanodeinfos[i].getIpcPort());
assertTrue(datanodes[i] != null);
}
ExtendedBlock lastblock=locatedblock.getBlock();
DataNode.LOG.info("newblocks=" + lastblock);
for (int i=0; i < REPLICATION_NUM; i++) {
checkMetaInfo(lastblock,datanodes[i]);
}
DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
cluster.getNameNodeRpc().append(filestr,dfs.dfs.clientName);
waitLeaseRecovery(cluster);
Block[] updatedmetainfo=new Block[REPLICATION_NUM];
long oldSize=lastblock.getNumBytes();
lastblock=TestInterDatanodeProtocol.getLastLocatedBlock(dfs.dfs.getNamenode(),filestr).getBlock();
long currentGS=lastblock.getGenerationStamp();
for (int i=0; i < REPLICATION_NUM; i++) {
updatedmetainfo[i]=DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock(lastblock.getBlockPoolId(),lastblock.getBlockId());
assertEquals(lastblock.getBlockId(),updatedmetainfo[i].getBlockId());
assertEquals(oldSize,updatedmetainfo[i].getNumBytes());
assertEquals(currentGS,updatedmetainfo[i].getGenerationStamp());
}
System.out.println("Testing that lease recovery cannot happen during safemode.");
filestr="/foo.safemode";
filepath=new Path(filestr);
dfs.create(filepath,(short)1);
cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER,false);
assertTrue(dfs.dfs.exists(filestr));
DFSTestUtil.waitReplication(dfs,filepath,(short)1);
waitLeaseRecovery(cluster);
LeaseManager lm=NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
assertTrue("Found " + lm.countLease() + " lease, expected 1",lm.countLease() == 1);
cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,false);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Class: org.apache.hadoop.hdfs.TestLeaseRecovery2
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* This test makes the client does not renew its lease and also
* set the hard lease expiration period to be short 1s. Thus triggering
* lease expiration to happen while the client is still alive.
* The test makes sure that the lease recovery completes and the client
* fails if it continues to write to the file.
* @throws Exception
*/
@Test public void testHardLeaseRecovery() throws Exception {
String filestr="/hardLeaseRecovery";
AppendTestUtil.LOG.info("filestr=" + filestr);
Path filepath=new Path(filestr);
FSDataOutputStream stm=dfs.create(filepath,true,BUF_SIZE,REPLICATION_NUM,BLOCK_SIZE);
assertTrue(dfs.dfs.exists(filestr));
int size=AppendTestUtil.nextInt(FILE_SIZE);
AppendTestUtil.LOG.info("size=" + size);
stm.write(buffer,0,size);
AppendTestUtil.LOG.info("hflush");
stm.hflush();
AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
dfs.dfs.getLeaseRenewer().interruptAndJoin();
cluster.setLeasePeriod(LONG_LEASE_PERIOD,SHORT_LEASE_PERIOD);
LocatedBlocks locatedBlocks;
do {
Thread.sleep(SHORT_LEASE_PERIOD);
locatedBlocks=dfs.dfs.getLocatedBlocks(filestr,0L,size);
}
while (locatedBlocks.isUnderConstruction());
assertEquals(size,locatedBlocks.getFileLength());
try {
stm.write('b');
stm.close();
fail("Writer thread should have been killed");
}
catch ( IOException e) {
e.printStackTrace();
}
AppendTestUtil.LOG.info("File size is good. Now validating sizes from datanodes...");
AppendTestUtil.checkFullFile(dfs,filepath,size,buffer,filestr);
}
APIUtilityVerifierIterativeVerifierUtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
/**
* This test makes the client does not renew its lease and also
* set the soft lease expiration period to be short 1s. Thus triggering
* soft lease expiration to happen immediately by having another client
* trying to create the same file.
* The test makes sure that the lease recovery completes.
* @throws Exception
*/
@Test public void testSoftLeaseRecovery() throws Exception {
Map u2g_map=new HashMap(1);
u2g_map.put(fakeUsername,new String[]{fakeGroup});
DFSTestUtil.updateConfWithFakeGroupMapping(conf,u2g_map);
cluster.setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD,HdfsConstants.LEASE_HARDLIMIT_PERIOD);
String filestr="/foo" + AppendTestUtil.nextInt();
AppendTestUtil.LOG.info("filestr=" + filestr);
Path filepath=new Path(filestr);
FSDataOutputStream stm=dfs.create(filepath,true,BUF_SIZE,REPLICATION_NUM,BLOCK_SIZE);
assertTrue(dfs.dfs.exists(filestr));
int size=AppendTestUtil.nextInt(FILE_SIZE);
AppendTestUtil.LOG.info("size=" + size);
stm.write(buffer,0,size);
AppendTestUtil.LOG.info("hflush");
stm.hflush();
AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
dfs.dfs.getLeaseRenewer().interruptAndJoin();
cluster.setLeasePeriod(SHORT_LEASE_PERIOD,LONG_LEASE_PERIOD);
{
UserGroupInformation ugi=UserGroupInformation.createUserForTesting(fakeUsername,new String[]{fakeGroup});
FileSystem dfs2=DFSTestUtil.getFileSystemAs(ugi,conf);
boolean done=false;
for (int i=0; i < 10 && !done; i++) {
AppendTestUtil.LOG.info("i=" + i);
try {
dfs2.create(filepath,false,BUF_SIZE,REPLICATION_NUM,BLOCK_SIZE);
fail("Creation of an existing file should never succeed.");
}
catch ( FileAlreadyExistsException ex) {
done=true;
}
catch ( AlreadyBeingCreatedException ex) {
AppendTestUtil.LOG.info("GOOD! got " + ex.getMessage());
}
catch ( IOException ioe) {
AppendTestUtil.LOG.warn("UNEXPECTED IOException",ioe);
}
if (!done) {
AppendTestUtil.LOG.info("sleep " + 5000 + "ms");
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
}
}
assertTrue(done);
}
AppendTestUtil.LOG.info("Lease for file " + filepath + " is recovered. "+ "Validating its contents now...");
long fileSize=dfs.getFileStatus(filepath).getLen();
assertTrue("File should be " + size + " bytes, but is actually "+ " found to be "+ fileSize+ " bytes",fileSize == size);
AppendTestUtil.LOG.info("File size is good. " + "Now validating data and sizes from datanodes...");
AppendTestUtil.checkFullFile(dfs,filepath,size,buffer,filestr);
}
Class: org.apache.hadoop.hdfs.TestLeaseRenewer
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testThreadName() throws Exception {
DFSOutputStream mockStream=Mockito.mock(DFSOutputStream.class);
long fileId=789L;
Assert.assertFalse("Renewer not initially running",renewer.isRunning());
renewer.put(fileId,mockStream,MOCK_DFSCLIENT);
Assert.assertTrue("Renewer should have started running",renewer.isRunning());
String threadName=renewer.getDaemonName();
Assert.assertEquals("LeaseRenewer:myuser@hdfs://nn1/",threadName);
renewer.closeFile(fileId,MOCK_DFSCLIENT);
renewer.setEmptyTime(Time.now());
long failTime=Time.now() + 5000;
while (renewer.isRunning() && Time.now() < failTime) {
Thread.sleep(50);
}
Assert.assertFalse(renewer.isRunning());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Creates and closes a file of certain length.
* Calls append to allow next write() operation to add to the end of it
* After write() invocation, calls hflush() to make sure that data sunk through
* the pipeline and check the state of the last block's replica.
* It supposes to be in RBW state
* @throws IOException in case of an error
*/
@Test public void pipeline_01() throws IOException {
final String METHOD_NAME=GenericTestUtils.getMethodName();
if (LOG.isDebugEnabled()) {
LOG.debug("Running " + METHOD_NAME);
}
Path filePath=new Path("/" + METHOD_NAME + ".dat");
DFSTestUtil.createFile(fs,filePath,FILE_SIZE,REPL_FACTOR,rand.nextLong());
if (LOG.isDebugEnabled()) {
LOG.debug("Invoking append but doing nothing otherwise...");
}
FSDataOutputStream ofs=fs.append(filePath);
ofs.writeBytes("Some more stuff to write");
((DFSOutputStream)ofs.getWrappedStream()).hflush();
List lb=cluster.getNameNodeRpc().getBlockLocations(filePath.toString(),FILE_SIZE - 1,FILE_SIZE).getLocatedBlocks();
String bpid=cluster.getNamesystem().getBlockPoolId();
for ( DataNode dn : cluster.getDataNodes()) {
Replica r=DataNodeTestUtils.fetchReplicaInfo(dn,bpid,lb.get(0).getBlock().getBlockId());
assertTrue("Replica on DN " + dn + " shouldn't be null",r != null);
assertEquals("Should be RBW replica on " + dn + " after sequence of calls append()/write()/hflush()",HdfsServerConstants.ReplicaState.RBW,r.getState());
}
ofs.close();
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Like the previous test but create many files. This covers bugs where
* the quota adjustment is incorrect but it takes many files to accrue
* a big enough accounting error to violate the quota.
*/
@Test public void testMultipleFilesSmallerThanOneBlock() throws Exception {
Configuration conf=new HdfsConfiguration();
final int BLOCK_SIZE=6 * 1024;
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE);
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,true);
conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,2);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
DFSAdmin admin=new DFSAdmin(conf);
final String nnAddr=conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
final String webhdfsuri=WebHdfsFileSystem.SCHEME + "://" + nnAddr;
System.out.println("webhdfsuri=" + webhdfsuri);
final FileSystem webhdfs=new Path(webhdfsuri).getFileSystem(conf);
try {
long nsQuota=FSImageTestUtil.getNSQuota(cluster.getNameNode().getNamesystem());
assertTrue("Default namespace quota expected as long max. But the value is :" + nsQuota,nsQuota == Long.MAX_VALUE);
Path dir=new Path("/test");
boolean exceededQuota=false;
ContentSummary c;
final int FILE_SIZE=1024;
final int QUOTA_SIZE=32 * (int)fs.getDefaultBlockSize(dir);
assertEquals(6 * 1024,fs.getDefaultBlockSize(dir));
assertEquals(192 * 1024,QUOTA_SIZE);
assertTrue(fs.mkdirs(dir));
runCommand(admin,false,"-setSpaceQuota",Integer.toString(QUOTA_SIZE),dir.toString());
for (int i=0; i < 59; i++) {
Path file=new Path("/test/test" + i);
DFSTestUtil.createFile(fs,file,FILE_SIZE,(short)3,1L);
DFSTestUtil.waitReplication(fs,file,(short)3);
}
c=fs.getContentSummary(dir);
checkContentSummary(c,webhdfs.getContentSummary(dir));
assertEquals("Invalid space consumed",59 * FILE_SIZE * 3,c.getSpaceConsumed());
assertEquals("Invalid space consumed",QUOTA_SIZE - (59 * FILE_SIZE * 3),3 * (fs.getDefaultBlockSize(dir) - FILE_SIZE));
try {
Path file=new Path("/test/test59");
DFSTestUtil.createFile(fs,file,FILE_SIZE,(short)3,1L);
DFSTestUtil.waitReplication(fs,file,(short)3);
}
catch ( QuotaExceededException e) {
exceededQuota=true;
}
assertTrue("Quota not exceeded",exceededQuota);
assertEquals(2,cluster.getNamesystem().getFSDirectory().getYieldCount());
}
finally {
cluster.shutdown();
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
/**
* Test limit cases for setting space quotas.
*/
@Test public void testMaxSpaceQuotas() throws Exception {
final Configuration conf=new HdfsConfiguration();
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
final FileSystem fs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs=(DistributedFileSystem)fs;
final Path testFolder=new Path("/testFolder");
assertTrue(dfs.mkdirs(testFolder));
dfs.setQuota(testFolder,Long.MAX_VALUE - 1,10);
ContentSummary c=dfs.getContentSummary(testFolder);
assertTrue("Quota not set properly",c.getQuota() == Long.MAX_VALUE - 1);
dfs.setQuota(testFolder,10,Long.MAX_VALUE - 1);
c=dfs.getContentSummary(testFolder);
assertTrue("Quota not set properly",c.getSpaceQuota() == Long.MAX_VALUE - 1);
dfs.setQuota(testFolder,Long.MAX_VALUE,10);
c=dfs.getContentSummary(testFolder);
assertTrue("Quota should not have changed",c.getQuota() == 10);
dfs.setQuota(testFolder,10,Long.MAX_VALUE);
c=dfs.getContentSummary(testFolder);
assertTrue("Quota should not have changed",c.getSpaceQuota() == 10);
try {
dfs.setQuota(testFolder,Long.MAX_VALUE + 1,10);
fail("Exception not thrown");
}
catch ( IllegalArgumentException e) {
}
try {
dfs.setQuota(testFolder,10,Long.MAX_VALUE + 1);
fail("Exception not thrown");
}
catch ( IllegalArgumentException e) {
}
}
finally {
cluster.shutdown();
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testRollbackWithQJM() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniJournalCluster mjc=null;
MiniDFSCluster cluster=null;
final Path foo=new Path("/foo");
final Path bar=new Path("/bar");
try {
mjc=new MiniJournalCluster.Builder(conf).numJournalNodes(NUM_JOURNAL_NODES).build();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,mjc.getQuorumJournalURI(JOURNAL_ID).toString());
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
DistributedFileSystem dfs=cluster.getFileSystem();
final DFSAdmin dfsadmin=new DFSAdmin(conf);
dfs.mkdirs(foo);
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
Assert.assertEquals(0,dfsadmin.run(new String[]{"-rollingUpgrade","prepare"}));
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
dfs.mkdirs(bar);
dfs.close();
cluster.restartNameNode("-rollingUpgrade","rollback");
dfs=cluster.getFileSystem();
Assert.assertTrue(dfs.exists(foo));
Assert.assertFalse(dfs.exists(bar));
for (int i=0; i < NUM_JOURNAL_NODES; i++) {
File dir=mjc.getCurrentDir(0,JOURNAL_ID);
checkJNStorage(dir,4,7);
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
if (mjc != null) {
mjc.shutdown();
}
}
}
Class: org.apache.hadoop.hdfs.TestSafeMode
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that the NN initializes its under-replicated blocks queue
* before it is ready to exit safemode (HDFS-1476)
*/
@Test(timeout=45000) public void testInitializeReplQueuesEarly() throws Exception {
LOG.info("Starting testInitializeReplQueuesEarly");
BlockManagerTestUtil.setWritingPrefersLocalNode(cluster.getNamesystem().getBlockManager(),false);
cluster.startDataNodes(conf,2,true,StartupOption.REGULAR,null);
cluster.waitActive();
LOG.info("Creating files");
DFSTestUtil.createFile(fs,TEST_PATH,15 * BLOCK_SIZE,(short)1,1L);
LOG.info("Stopping all DataNodes");
List dnprops=Lists.newLinkedList();
dnprops.add(cluster.stopDataNode(0));
dnprops.add(cluster.stopDataNode(0));
dnprops.add(cluster.stopDataNode(0));
cluster.getConfiguration(0).setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY,1f / 15f);
LOG.info("Restarting NameNode");
cluster.restartNameNode();
final NameNode nn=cluster.getNameNode();
String status=nn.getNamesystem().getSafemode();
assertEquals("Safe mode is ON. The reported blocks 0 needs additional " + "15 blocks to reach the threshold 0.9990 of total blocks 15.\n" + "The number of live datanodes 0 has reached the minimum number 0. "+ "Safe mode will be turned off automatically once the thresholds "+ "have been reached.",status);
assertFalse("Mis-replicated block queues should not be initialized " + "until threshold is crossed",NameNodeAdapter.safeModeInitializedReplQueues(nn));
LOG.info("Restarting one DataNode");
cluster.restartDataNode(dnprops.remove(0));
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
return getLongCounter("StorageBlockReportOps",getMetrics(NN_METRICS)) == cluster.getStoragesPerDatanode();
}
}
,10,10000);
final int safe=NameNodeAdapter.getSafeModeSafeBlocks(nn);
assertTrue("Expected first block report to make some blocks safe.",safe > 0);
assertTrue("Did not expect first block report to make all blocks safe.",safe < 15);
assertTrue(NameNodeAdapter.safeModeInitializedReplQueues(nn));
BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
long underReplicatedBlocks=nn.getNamesystem().getUnderReplicatedBlocks();
while (underReplicatedBlocks != (15 - safe)) {
LOG.info("UnderReplicatedBlocks expected=" + (15 - safe) + ", actual="+ underReplicatedBlocks);
Thread.sleep(100);
BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
underReplicatedBlocks=nn.getNamesystem().getUnderReplicatedBlocks();
}
cluster.restartDataNodes();
}
UtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
/**
* Run various fs operations while the NN is in safe mode,
* assert that they are either allowed or fail as expected.
*/
@Test public void testOperationsWhileInSafeMode() throws IOException, InterruptedException {
final Path file1=new Path("/file1");
assertFalse(dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
DFSTestUtil.createFile(fs,file1,1024,(short)1,0);
assertTrue("Could not enter SM",dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER));
runFsFun("Set quota while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
((DistributedFileSystem)fs).setQuota(file1,1,1);
}
}
);
runFsFun("Set perm while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.setPermission(file1,FsPermission.getDefault());
}
}
);
runFsFun("Set owner while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.setOwner(file1,"user","group");
}
}
);
runFsFun("Set repl while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.setReplication(file1,(short)1);
}
}
);
runFsFun("Append file while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
DFSTestUtil.appendFile(fs,file1,"new bytes");
}
}
);
runFsFun("Delete file while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.delete(file1,false);
}
}
);
runFsFun("Rename file while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.rename(file1,new Path("file2"));
}
}
);
runFsFun("Set time while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.setTimes(file1,0,0);
}
}
);
runFsFun("modifyAclEntries while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.modifyAclEntries(file1,Lists.newArrayList());
}
}
);
runFsFun("removeAclEntries while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.removeAclEntries(file1,Lists.newArrayList());
}
}
);
runFsFun("removeDefaultAcl while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.removeDefaultAcl(file1);
}
}
);
runFsFun("removeAcl while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.removeAcl(file1);
}
}
);
runFsFun("setAcl while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.setAcl(file1,Lists.newArrayList());
}
}
);
runFsFun("setXAttr while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.setXAttr(file1,"user.a1",null);
}
}
);
runFsFun("removeXAttr while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.removeXAttr(file1,"user.a1");
}
}
);
try {
DFSTestUtil.readFile(fs,file1);
}
catch ( IOException ioe) {
fail("Set times failed while in SM");
}
try {
fs.getAclStatus(file1);
}
catch ( IOException ioe) {
fail("getAclStatus failed while in SM");
}
UserGroupInformation ugiX=UserGroupInformation.createRemoteUser("userX");
FileSystem myfs=ugiX.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws IOException {
return FileSystem.get(conf);
}
}
);
myfs.access(file1,FsAction.READ);
try {
myfs.access(file1,FsAction.WRITE);
fail("The access call should have failed.");
}
catch ( AccessControlException e) {
}
assertFalse("Could not leave SM",dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Verify that the NameNode stays in safemode when dfs.safemode.datanode.min
* is set to a number greater than the number of live datanodes.
*/
@Test public void testDatanodeThreshold() throws IOException {
cluster.shutdown();
Configuration conf=cluster.getConfiguration(0);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY,0);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY,1);
cluster.restartNameNode();
fs=cluster.getFileSystem();
String tipMsg=cluster.getNamesystem().getSafemode();
assertTrue("Safemode tip message doesn't look right: " + tipMsg,tipMsg.contains("The number of live datanodes 0 needs an additional " + "1 live datanodes to reach the minimum number 1.\n" + "Safe mode will be turned off automatically"));
cluster.startDataNodes(conf,1,true,null,null);
try {
Thread.sleep(1000);
}
catch ( InterruptedException ignored) {
}
assertEquals("",cluster.getNamesystem().getSafemode());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test to make sure NameNode.Feature support previous features
*/
@Test public void testNameNodeFeature(){
final LayoutFeature first=NameNodeLayoutVersion.Feature.ROLLING_UPGRADE;
assertTrue(NameNodeLayoutVersion.supports(LAST_NON_RESERVED_COMMON_FEATURE,first.getInfo().getLayoutVersion()));
assertEquals(LAST_COMMON_FEATURE.getInfo().getLayoutVersion() - 1,first.getInfo().getLayoutVersion());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test to make sure DataNode.Feature support previous features
*/
@Test public void testDataNodeFeature(){
final LayoutFeature first=DataNodeLayoutVersion.Feature.FIRST_LAYOUT;
assertTrue(DataNodeLayoutVersion.supports(LAST_NON_RESERVED_COMMON_FEATURE,first.getInfo().getLayoutVersion()));
assertEquals(LAST_COMMON_FEATURE.getInfo().getLayoutVersion() - 1,first.getInfo().getLayoutVersion());
}
UtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
/**
* Test that, if the remote node gets unsynchronized (eg some edits were
* missed or the node rebooted), the client stops sending edits until
* the next roll. Test for HDFS-3726.
*/
@Test public void testStopSendingEditsWhenOutOfSync() throws Exception {
Mockito.doThrow(new IOException("injected error")).when(mockProxy).journal(Mockito.any(),Mockito.eq(1L),Mockito.eq(1L),Mockito.eq(1),Mockito.same(FAKE_DATA));
try {
ch.sendEdits(1L,1L,1,FAKE_DATA).get();
fail("Injected JOOSE did not cause sendEdits() to throw");
}
catch ( ExecutionException ee) {
GenericTestUtils.assertExceptionContains("injected",ee);
}
Mockito.verify(mockProxy).journal(Mockito.any(),Mockito.eq(1L),Mockito.eq(1L),Mockito.eq(1),Mockito.same(FAKE_DATA));
assertTrue(ch.isOutOfSync());
try {
ch.sendEdits(1L,2L,1,FAKE_DATA).get();
fail("sendEdits() should throw until next roll");
}
catch ( ExecutionException ee) {
GenericTestUtils.assertExceptionContains("disabled until next roll",ee.getCause());
}
Mockito.verify(mockProxy,Mockito.never()).journal(Mockito.any(),Mockito.eq(1L),Mockito.eq(2L),Mockito.eq(1),Mockito.same(FAKE_DATA));
Mockito.verify(mockProxy).heartbeat(Mockito.any());
ch.startLogSegment(3L,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
assertFalse(ch.isOutOfSync());
ch.sendEdits(3L,3L,1,FAKE_DATA).get();
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test that, once the queue eclipses the configure size limit,
* calls to journal more data are rejected.
*/
@Test public void testQueueLimiting() throws Exception {
DelayAnswer delayer=new DelayAnswer(LOG);
Mockito.doAnswer(delayer).when(mockProxy).journal(Mockito.any(),Mockito.eq(1L),Mockito.eq(1L),Mockito.eq(1),Mockito.same(FAKE_DATA));
int numToQueue=LIMIT_QUEUE_SIZE_BYTES / FAKE_DATA.length;
for (int i=1; i <= numToQueue; i++) {
ch.sendEdits(1L,(long)i,1,FAKE_DATA);
}
assertEquals(LIMIT_QUEUE_SIZE_BYTES,ch.getQueuedEditsSize());
try {
ch.sendEdits(1L,numToQueue + 1,1,FAKE_DATA).get(1,TimeUnit.SECONDS);
fail("Did not fail to queue more calls after queue was full");
}
catch ( ExecutionException ee) {
if (!(ee.getCause() instanceof LoggerTooFarBehindException)) {
throw ee;
}
}
delayer.proceed();
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
return ch.getQueuedEditsSize() == 0;
}
}
,10,1000);
}
UtilityVerifierBooleanVerifierHybridVerifier
/**
* Regression test for HDFS-3891: selectInputStreams should throw
* an exception when a majority of journalnodes have crashed.
*/
@Test public void testSelectInputStreamsMajorityDown() throws Exception {
cluster.shutdown();
List streams=Lists.newArrayList();
try {
qjm.selectInputStreams(streams,0,false);
fail("Did not throw IOE");
}
catch ( QuorumException ioe) {
GenericTestUtils.assertExceptionContains("Got too many exceptions",ioe);
assertTrue(streams.isEmpty());
}
}
APIUtilityVerifierUtilityVerifierEqualityVerifierHybridVerifier
/**
* Test the case where one of the loggers misses a finalizeLogSegment()
* call, and then misses the next startLogSegment() call before coming
* back to life.
* Previously, this caused it to keep on writing to the old log segment,
* such that one logger had eg edits_1-10 while the others had edits_1-5 and
* edits_6-10. This caused recovery to fail in certain cases.
*/
@Test public void testMissFinalizeAndNextStart() throws Exception {
futureThrows(new IOException("injected")).when(spies.get(0)).finalizeLogSegment(Mockito.eq(1L),Mockito.eq(3L));
futureThrows(new IOException("injected")).when(spies.get(0)).startLogSegment(Mockito.eq(4L),Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
failLoggerAtTxn(spies.get(1),4L);
writeSegment(cluster,qjm,1,3,true);
EditLogOutputStream stm=qjm.startLogSegment(4,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
try {
writeTxns(stm,4,1);
fail("Did not fail to write");
}
catch ( QuorumException qe) {
GenericTestUtils.assertExceptionContains("Writer out of sync",qe);
}
finally {
stm.abort();
qjm.close();
}
cluster.getJournalNode(2).stopAndJoin(0);
qjm=createSpyingQJM();
long recovered=QJMTestUtil.recoverAndReturnLastTxn(qjm);
assertEquals(3L,recovered);
}
UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that the JournalNode performs correctly as a Paxos
* Acceptor process.
*/
@Test(timeout=100000) public void testAcceptRecoveryBehavior() throws Exception {
try {
ch.prepareRecovery(1L).get();
fail("Did not throw IllegalState when trying to run paxos without an epoch");
}
catch ( ExecutionException ise) {
GenericTestUtils.assertExceptionContains("bad epoch",ise);
}
ch.newEpoch(1).get();
ch.setEpoch(1);
PrepareRecoveryResponseProto prep=ch.prepareRecovery(1L).get();
System.err.println("Prep: " + prep);
assertFalse(prep.hasAcceptedInEpoch());
assertFalse(prep.hasSegmentState());
ch.startLogSegment(1L,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
ch.sendEdits(1L,1L,1,QJMTestUtil.createTxnData(1,1)).get();
prep=ch.prepareRecovery(1L).get();
System.err.println("Prep: " + prep);
assertFalse(prep.hasAcceptedInEpoch());
assertTrue(prep.hasSegmentState());
ch.acceptRecovery(prep.getSegmentState(),new URL("file:///dev/null")).get();
ch.newEpoch(2);
ch.setEpoch(2);
prep=ch.prepareRecovery(1L).get();
assertEquals(1L,prep.getAcceptedInEpoch());
assertEquals(1L,prep.getSegmentState().getEndTxId());
ch.setEpoch(1);
try {
ch.prepareRecovery(1L).get();
fail("prepare from earlier epoch not rejected");
}
catch ( ExecutionException ioe) {
GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 2",ioe);
}
try {
ch.acceptRecovery(prep.getSegmentState(),new URL("file:///dev/null")).get();
fail("accept from earlier epoch not rejected");
}
catch ( ExecutionException ioe) {
GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 2",ioe);
}
}
APIUtilityVerifierIterativeVerifierBranchVerifierUtilityVerifierInternalCallVerifierAssumptionSetterEqualityVerifierHybridVerifier
/**
* Test that fast repeated invocations of createClientDatanodeProtocolProxy
* will not end up using up thousands of sockets. This is a regression test
* for HDFS-1965.
*/
@Test public void testBlockTokenRpcLeak() throws Exception {
Configuration conf=new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
Assume.assumeTrue(FD_DIR.exists());
BlockTokenSecretManager sm=new BlockTokenSecretManager(blockKeyUpdateInterval,blockTokenLifetime,0,"fake-pool",null);
Token token=sm.generateToken(block3,EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));
final Server server=createMockDatanode(sm,token,conf);
server.start();
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
DatanodeID fakeDnId=DFSTestUtil.getLocalDatanodeID(addr.getPort());
ExtendedBlock b=new ExtendedBlock("fake-pool",new Block(12345L));
LocatedBlock fakeBlock=new LocatedBlock(b,new DatanodeInfo[0]);
fakeBlock.setBlockToken(token);
ClientDatanodeProtocol proxyToNoWhere=RPC.getProxy(ClientDatanodeProtocol.class,ClientDatanodeProtocol.versionID,new InetSocketAddress("1.1.1.1",1),UserGroupInformation.createRemoteUser("junk"),conf,NetUtils.getDefaultSocketFactory(conf));
ClientDatanodeProtocol proxy=null;
int fdsAtStart=countOpenFileDescriptors();
try {
long endTime=Time.now() + 3000;
while (Time.now() < endTime) {
proxy=DFSUtil.createClientDatanodeProtocolProxy(fakeDnId,conf,1000,false,fakeBlock);
assertEquals(block3.getBlockId(),proxy.getReplicaVisibleLength(block3));
if (proxy != null) {
RPC.stopProxy(proxy);
}
LOG.info("Num open fds:" + countOpenFileDescriptors());
}
int fdsAtEnd=countOpenFileDescriptors();
if (fdsAtEnd - fdsAtStart > 50) {
fail("Leaked " + (fdsAtEnd - fdsAtStart) + " fds!");
}
}
finally {
server.stop();
}
RPC.stopProxy(proxyToNoWhere);
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Test parse method in Balancer#Cli class with threshold value out of
* boundaries.
*/
@Test(timeout=100000) public void testBalancerCliParseWithThresholdOutOfBoundaries(){
String parameters[]=new String[]{"-threshold","0"};
String reason="IllegalArgumentException is expected when threshold value" + " is out of boundary.";
try {
Balancer.Cli.parse(parameters);
fail(reason);
}
catch ( IllegalArgumentException e) {
assertEquals("Number out of range: threshold = 0.0",e.getMessage());
}
parameters=new String[]{"-threshold","101"};
try {
Balancer.Cli.parse(parameters);
fail(reason);
}
catch ( IllegalArgumentException e) {
assertEquals("Number out of range: threshold = 101.0",e.getMessage());
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* Test a cluster with even distribution, then a new empty node is added to
* the cluster. Test start a cluster with specified number of nodes, and fills
* it to be 30% full (with a single file replicated identically to all
* datanodes); It then adds one new empty node and starts balancing.
*/
@Test(timeout=60000) public void testBalancerWithHANameNodes() throws Exception {
Configuration conf=new HdfsConfiguration();
TestBalancer.initConf(conf);
long newNodeCapacity=TestBalancer.CAPACITY;
String newNodeRack=TestBalancer.RACK2;
String[] racks=new String[]{TestBalancer.RACK0,TestBalancer.RACK1};
long[] capacities=new long[]{TestBalancer.CAPACITY,TestBalancer.CAPACITY};
assertEquals(capacities.length,racks.length);
int numOfDatanodes=capacities.length;
NNConf nn1Conf=new MiniDFSNNTopology.NNConf("nn1");
nn1Conf.setIpcPort(NameNode.DEFAULT_PORT);
Configuration copiedConf=new Configuration(conf);
cluster=new MiniDFSCluster.Builder(copiedConf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities).build();
HATestUtil.setFailoverConfigurations(cluster,conf);
try {
cluster.waitActive();
cluster.transitionToActive(1);
Thread.sleep(500);
client=NameNodeProxies.createProxy(conf,FileSystem.getDefaultUri(conf),ClientProtocol.class).getProxy();
long totalCapacity=TestBalancer.sum(capacities);
long totalUsedSpace=totalCapacity * 3 / 10;
TestBalancer.createFile(cluster,TestBalancer.filePath,totalUsedSpace / numOfDatanodes,(short)numOfDatanodes,1);
cluster.startDataNodes(conf,1,true,null,new String[]{newNodeRack},new long[]{newNodeCapacity});
totalCapacity+=newNodeCapacity;
TestBalancer.waitForHeartBeat(totalUsedSpace,totalCapacity,client,cluster);
Collection namenodes=DFSUtil.getNsServiceRpcUris(conf);
assertEquals(1,namenodes.size());
assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster)));
final int r=Balancer.run(namenodes,Balancer.Parameters.DEFAULT,conf);
assertEquals(ExitStatus.SUCCESS.getExitCode(),r);
TestBalancer.waitForBalancer(totalUsedSpace,totalCapacity,client,cluster,Balancer.Parameters.DEFAULT);
}
finally {
cluster.shutdown();
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testAddStorage() throws Exception {
BlockInfo blockInfo=new BlockInfo(3);
final DatanodeStorageInfo storage=DFSTestUtil.createDatanodeStorageInfo("storageID","127.0.0.1");
boolean added=blockInfo.addStorage(storage);
Assert.assertTrue(added);
Assert.assertEquals(storage,blockInfo.getStorageInfo(0));
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testBlockListMoveToHead() throws Exception {
LOG.info("BlockInfo moveToHead tests...");
final int MAX_BLOCKS=10;
DatanodeStorageInfo dd=DFSTestUtil.createDatanodeStorageInfo("s1","1.1.1.1");
ArrayList blockList=new ArrayList(MAX_BLOCKS);
ArrayList blockInfoList=new ArrayList();
int headIndex;
int curIndex;
LOG.info("Building block list...");
for (int i=0; i < MAX_BLOCKS; i++) {
blockList.add(new Block(i,0,GenerationStamp.LAST_RESERVED_STAMP));
blockInfoList.add(new BlockInfo(blockList.get(i),3));
dd.addBlock(blockInfoList.get(i));
assertEquals("Find datanode should be 0",0,blockInfoList.get(i).findStorageInfo(dd));
}
LOG.info("Checking list length...");
assertEquals("Length should be MAX_BLOCK",MAX_BLOCKS,dd.numBlocks());
Iterator it=dd.getBlockIterator();
int len=0;
while (it.hasNext()) {
it.next();
len++;
}
assertEquals("There should be MAX_BLOCK blockInfo's",MAX_BLOCKS,len);
headIndex=dd.getBlockListHeadForTesting().findStorageInfo(dd);
LOG.info("Moving each block to the head of the list...");
for (int i=0; i < MAX_BLOCKS; i++) {
curIndex=blockInfoList.get(i).findStorageInfo(dd);
headIndex=dd.moveBlockToHead(blockInfoList.get(i),curIndex,headIndex);
assertEquals("Block should be at the head of the list now.",blockInfoList.get(i),dd.getBlockListHeadForTesting());
}
LOG.info("Moving head to the head...");
BlockInfo temp=dd.getBlockListHeadForTesting();
curIndex=0;
headIndex=0;
dd.moveBlockToHead(temp,curIndex,headIndex);
assertEquals("Moving head to the head of the list shopuld not change the list",temp,dd.getBlockListHeadForTesting());
LOG.info("Checking elements of the list...");
temp=dd.getBlockListHeadForTesting();
assertNotNull("Head should not be null",temp);
int c=MAX_BLOCKS - 1;
while (temp != null) {
assertEquals("Expected element is not on the list",blockInfoList.get(c--),temp);
temp=temp.getNext(0);
}
LOG.info("Moving random blocks to the head of the list...");
headIndex=dd.getBlockListHeadForTesting().findStorageInfo(dd);
Random rand=new Random();
for (int i=0; i < MAX_BLOCKS; i++) {
int j=rand.nextInt(MAX_BLOCKS);
curIndex=blockInfoList.get(j).findStorageInfo(dd);
headIndex=dd.moveBlockToHead(blockInfoList.get(j),curIndex,headIndex);
assertEquals("Block should be at the head of the list now.",blockInfoList.get(j),dd.getBlockListHeadForTesting());
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testSingleList(){
DatanodeDescriptor dn=new DatanodeDescriptor(new DatanodeID("127.0.0.1","localhost","abcd",5000,5001,5002,5003));
CachedBlock[] blocks=new CachedBlock[]{new CachedBlock(0L,(short)1,true),new CachedBlock(1L,(short)1,true),new CachedBlock(2L,(short)1,true)};
Assert.assertTrue("expected pending cached list to start off empty.",!dn.getPendingCached().iterator().hasNext());
Assert.assertTrue("expected cached list to start off empty.",!dn.getCached().iterator().hasNext());
Assert.assertTrue("expected pending uncached list to start off empty.",!dn.getPendingUncached().iterator().hasNext());
Assert.assertTrue(dn.getCached().add(blocks[0]));
Assert.assertTrue("expected pending cached list to still be empty.",!dn.getPendingCached().iterator().hasNext());
Assert.assertEquals("failed to insert blocks[0]",blocks[0],dn.getCached().iterator().next());
Assert.assertTrue("expected pending uncached list to still be empty.",!dn.getPendingUncached().iterator().hasNext());
Assert.assertTrue(dn.getCached().add(blocks[1]));
Iterator iter=dn.getCached().iterator();
Assert.assertEquals(blocks[0],iter.next());
Assert.assertEquals(blocks[1],iter.next());
Assert.assertTrue(!iter.hasNext());
Assert.assertTrue(dn.getCached().addFirst(blocks[2]));
iter=dn.getCached().iterator();
Assert.assertEquals(blocks[2],iter.next());
Assert.assertEquals(blocks[0],iter.next());
Assert.assertEquals(blocks[1],iter.next());
Assert.assertTrue(!iter.hasNext());
Assert.assertTrue(dn.getCached().remove(blocks[0]));
iter=dn.getCached().iterator();
Assert.assertEquals(blocks[2],iter.next());
Assert.assertEquals(blocks[1],iter.next());
Assert.assertTrue(!iter.hasNext());
dn.getCached().clear();
Assert.assertTrue("expected cached list to be empty after clear.",!dn.getPendingCached().iterator().hasNext());
}
InternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testCorruptReplicaInfo() throws IOException, InterruptedException {
CorruptReplicasMap crm=new CorruptReplicasMap();
assertEquals("Number of corrupt blocks must initially be 0",0,crm.size());
assertNull("Param n cannot be less than 0",crm.getCorruptReplicaBlockIds(-1,null));
assertNull("Param n cannot be greater than 100",crm.getCorruptReplicaBlockIds(101,null));
long[] l=crm.getCorruptReplicaBlockIds(0,null);
assertNotNull("n = 0 must return non-null",l);
assertEquals("n = 0 must return an empty list",0,l.length);
int NUM_BLOCK_IDS=140;
List block_ids=new LinkedList();
for (int i=0; i < NUM_BLOCK_IDS; i++) {
block_ids.add((long)i);
}
DatanodeDescriptor dn1=DFSTestUtil.getLocalDatanodeDescriptor();
DatanodeDescriptor dn2=DFSTestUtil.getLocalDatanodeDescriptor();
addToCorruptReplicasMap(crm,getBlock(0),dn1);
assertEquals("Number of corrupt blocks not returning correctly",1,crm.size());
addToCorruptReplicasMap(crm,getBlock(1),dn1);
assertEquals("Number of corrupt blocks not returning correctly",2,crm.size());
addToCorruptReplicasMap(crm,getBlock(1),dn2);
assertEquals("Number of corrupt blocks not returning correctly",2,crm.size());
crm.removeFromCorruptReplicasMap(getBlock(1));
assertEquals("Number of corrupt blocks not returning correctly",1,crm.size());
crm.removeFromCorruptReplicasMap(getBlock(0));
assertEquals("Number of corrupt blocks not returning correctly",0,crm.size());
for ( Long block_id : block_ids) {
addToCorruptReplicasMap(crm,getBlock(block_id),dn1);
}
assertEquals("Number of corrupt blocks not returning correctly",NUM_BLOCK_IDS,crm.size());
assertTrue("First five block ids not returned correctly ",Arrays.equals(new long[]{0,1,2,3,4},crm.getCorruptReplicaBlockIds(5,null)));
LOG.info(crm.getCorruptReplicaBlockIds(10,7L));
LOG.info(block_ids.subList(7,18));
assertTrue("10 blocks after 7 not returned correctly ",Arrays.equals(new long[]{8,9,10,11,12,13,14,15,16,17},crm.getCorruptReplicaBlockIds(10,7L)));
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierAssumptionSetterEqualityVerifierHybridVerifier
/**
* Test when a block's replica is removed from RBW folder in one of the
* datanode, namenode should ask to invalidate that corrupted block and
* schedule replication for one more replica for that under replicated block.
*/
@Test(timeout=600000) public void testBlockInvalidationWhenRBWReplicaMissedInDN() throws IOException, InterruptedException {
assumeTrue(!Path.WINDOWS);
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,2);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,300);
conf.setLong(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FSDataOutputStream out=null;
try {
final FSNamesystem namesystem=cluster.getNamesystem();
FileSystem fs=cluster.getFileSystem();
Path testPath=new Path("/tmp/TestRBWBlockInvalidation","foo1");
out=fs.create(testPath,(short)2);
out.writeBytes("HDFS-3157: " + testPath);
out.hsync();
cluster.startDataNodes(conf,1,true,null,null,null);
String bpid=namesystem.getBlockPoolId();
ExtendedBlock blk=DFSTestUtil.getFirstBlock(fs,testPath);
Block block=blk.getLocalBlock();
DataNode dn=cluster.getDataNodes().get(0);
File blockFile=DataNodeTestUtils.getBlockFile(dn,bpid,block);
File metaFile=DataNodeTestUtils.getMetaFile(dn,bpid,block);
assertTrue("Could not delete the block file from the RBW folder",blockFile.delete());
assertTrue("Could not delete the block meta file from the RBW folder",metaFile.delete());
out.close();
int liveReplicas=0;
while (true) {
if ((liveReplicas=countReplicas(namesystem,blk).liveReplicas()) < 2) {
LOG.info("Live Replicas after corruption: " + liveReplicas);
break;
}
Thread.sleep(100);
}
assertEquals("There should be less than 2 replicas in the " + "liveReplicasMap",1,liveReplicas);
while (true) {
if ((liveReplicas=countReplicas(namesystem,blk).liveReplicas()) > 1) {
LOG.info("Live Replicas after Rereplication: " + liveReplicas);
break;
}
Thread.sleep(100);
}
assertEquals("There should be two live replicas",2,liveReplicas);
while (true) {
Thread.sleep(100);
if (countReplicas(namesystem,blk).corruptReplicas() == 0) {
LOG.info("Corrupt Replicas becomes 0");
break;
}
}
}
finally {
if (out != null) {
out.close();
}
cluster.shutdown();
}
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, client is is a node outside of file system.
* So the 1st replica can be placed on any node.
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 2nd replica,
* @throws Exception
*/
@Test public void testChooseTarget5() throws Exception {
DatanodeDescriptor writerDesc=DFSTestUtil.getDatanodeDescriptor("7.7.7.7","/d2/r4");
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,writerDesc);
assertEquals(targets.length,0);
targets=chooseTarget(1,writerDesc);
assertEquals(targets.length,1);
targets=chooseTarget(2,writerDesc);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3,writerDesc);
assertEquals(targets.length,3);
assertTrue(isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameRack(targets[0],targets[1]));
}
IterativeVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, client is dataNodes[0], but dataNodes[0] is not qualified
* to be chosen. So the 1st replica should be placed on dataNodes[1],
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 2nd replica,
* and the rest should be placed on the third rack.
* @throws Exception
*/
@Test public void testChooseTarget3() throws Exception {
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0);
assertEquals(targets.length,0);
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertEquals(storages[1],targets[0]);
targets=chooseTarget(2);
assertEquals(targets.length,2);
assertEquals(storages[1],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3);
assertEquals(targets.length,3);
assertEquals(storages[1],targets[0]);
assertTrue(isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(4);
assertEquals(targets.length,4);
assertEquals(storages[1],targets[0]);
for (int i=1; i < 4; i++) {
assertFalse(isOnSameRack(targets[0],targets[i]));
}
assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3]));
assertFalse(isOnSameRack(targets[1],targets[3]));
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
BooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, we set 3 nodes (dataNodes[0] ~ dataNodes[2]) as stale,
* and when the number of replicas is less or equal to 3, all the healthy
* datanodes should be returned by the chooseTarget method. When the number
* of replicas is 4, a stale node should be included.
* @throws Exception
*/
@Test public void testChooseTargetWithHalfStaleNodes() throws Exception {
for (int i=0; i < 3; i++) {
dataNodes[i].setLastUpdate(Time.now() - staleInterval - 1);
}
namenode.getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck();
DatanodeStorageInfo[] targets=chooseTarget(0);
assertEquals(targets.length,0);
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertFalse(containsWithinRange(targets[0],dataNodes,0,2));
targets=chooseTarget(2);
assertEquals(targets.length,2);
assertFalse(containsWithinRange(targets[0],dataNodes,0,2));
assertFalse(containsWithinRange(targets[1],dataNodes,0,2));
targets=chooseTarget(3);
assertEquals(targets.length,3);
assertTrue(containsWithinRange(targets[0],dataNodes,3,5));
assertTrue(containsWithinRange(targets[1],dataNodes,3,5));
assertTrue(containsWithinRange(targets[2],dataNodes,3,5));
targets=chooseTarget(4);
assertEquals(targets.length,4);
assertTrue(containsWithinRange(dataNodes[3],targets,0,3));
assertTrue(containsWithinRange(dataNodes[4],targets,0,3));
assertTrue(containsWithinRange(dataNodes[5],targets,0,3));
for (int i=0; i < dataNodes.length; i++) {
dataNodes[i].setLastUpdate(Time.now());
}
namenode.getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck();
}
IterativeVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, client is dataNodes[0], but none of the nodes on rack 1
* is qualified to be chosen. So the 1st replica should be placed on either
* rack 2 or rack 3.
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 1st replica,
* @throws Exception
*/
@Test public void testChoooseTarget4() throws Exception {
for (int i=0; i < 2; i++) {
updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0);
}
DatanodeStorageInfo[] targets;
targets=chooseTarget(0);
assertEquals(targets.length,0);
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
targets=chooseTarget(2);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3);
assertEquals(targets.length,3);
for (int i=0; i < 3; i++) {
assertFalse(isOnSameRack(targets[i],dataNodes[0]));
}
assertTrue(isOnSameRack(targets[0],targets[1]) || isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameRack(targets[0],targets[2]));
for (int i=0; i < 2; i++) {
updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This testcase tests whether the value returned by
* DFSUtil.getReplWorkMultiplier() is positive,
* and whether an IllegalArgumentException will be thrown
* when a non-positive value is retrieved
*/
@Test public void testGetReplWorkMultiplier(){
Configuration conf=new Configuration();
int blocksReplWorkMultiplier=DFSUtil.getReplWorkMultiplier(conf);
assertTrue(blocksReplWorkMultiplier > 0);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"3");
blocksReplWorkMultiplier=DFSUtil.getReplWorkMultiplier(conf);
assertEquals(blocksReplWorkMultiplier,3);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"-1");
exception.expect(IllegalArgumentException.class);
blocksReplWorkMultiplier=DFSUtil.getReplWorkMultiplier(conf);
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This testcase tests re-replication, when dataNodes[0] is already chosen.
* So the 1st replica can be placed on random rack.
* the 2nd replica should be placed on different node by same rack as
* the 1st replica. The 3rd replica can be placed randomly.
* @throws Exception
*/
@Test public void testRereplicate1() throws Exception {
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(targets[0],dataNodes[0]));
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3,chosenNodes);
assertEquals(targets.length,3);
assertTrue(isOnSameRack(targets[0],dataNodes[0]));
assertFalse(isOnSameRack(targets[0],targets[2]));
}
BooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, client is dataNodes[0]. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on
* different rack and third should be placed on different node
* of rack chosen for 2nd node.
* The only excpetion is when the numOfReplicas is 2,
* the 1st is on dataNodes[0] and the 2nd is on a different rack.
* @throws Exception
*/
@Test public void testChooseTarget1() throws Exception {
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,4,0);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0);
assertEquals(targets.length,0);
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertEquals(storages[0],targets[0]);
targets=chooseTarget(2);
assertEquals(targets.length,2);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3);
assertEquals(targets.length,3);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
assertTrue(isOnSameRack(targets[1],targets[2]));
targets=chooseTarget(4);
assertEquals(targets.length,4);
assertEquals(storages[0],targets[0]);
assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3]));
assertFalse(isOnSameRack(targets[0],targets[2]));
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, client is dataNodes[0], but the dataNodes[1] is
* not allowed to be chosen. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on a different
* rack, the 3rd should be on same rack as the 2nd replica, and the rest
* should be placed on a third rack.
* @throws Exception
*/
@Test public void testChooseTarget2() throws Exception {
Set excludedNodes;
DatanodeStorageInfo[] targets;
List chosenNodes=new ArrayList();
excludedNodes=new HashSet();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(0,chosenNodes,excludedNodes);
assertEquals(targets.length,0);
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(1,chosenNodes,excludedNodes);
assertEquals(targets.length,1);
assertEquals(storages[0],targets[0]);
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(2,chosenNodes,excludedNodes);
assertEquals(targets.length,2);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(3,chosenNodes,excludedNodes);
assertEquals(targets.length,3);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
assertTrue(isOnSameRack(targets[1],targets[2]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(4,chosenNodes,excludedNodes);
assertEquals(targets.length,4);
assertEquals(storages[0],targets[0]);
for (int i=1; i < 4; i++) {
assertFalse(isOnSameRack(targets[0],targets[i]));
}
assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3]));
assertFalse(isOnSameRack(targets[1],targets[3]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
chosenNodes.add(storages[2]);
targets=replicator.chooseTarget(filename,1,dataNodes[0],chosenNodes,true,excludedNodes,BLOCK_SIZE,StorageType.DEFAULT);
System.out.println("targets=" + Arrays.asList(targets));
assertEquals(2,targets.length);
int i=0;
for (; i < targets.length && !storages[2].equals(targets[i]); i++) ;
assertTrue(i < targets.length);
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[2] are already chosen.
* So the 1st replica should be placed on the rack that the writer resides.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test public void testRereplicate3() throws Exception {
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[2]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertTrue(isOnSameRack(targets[0],dataNodes[0]));
assertFalse(isOnSameRack(targets[0],dataNodes[2]));
targets=chooseTarget(1,dataNodes[2],chosenNodes);
assertEquals(targets.length,1);
assertTrue(isOnSameRack(targets[0],dataNodes[2]));
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(targets[0],dataNodes[0]));
targets=chooseTarget(2,dataNodes[2],chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(targets[0],dataNodes[2]));
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, it tries to choose more targets than available nodes and
* check the result.
* @throws Exception
*/
@Test public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
for (int i=0; i < 2; i++) {
updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0);
}
final LogVerificationAppender appender=new LogVerificationAppender();
final Logger logger=Logger.getRootLogger();
logger.addAppender(appender);
DatanodeStorageInfo[] targets=chooseTarget(NUM_OF_DATANODES);
assertEquals(targets.length,NUM_OF_DATANODES - 2);
final List log=appender.getLog();
assertNotNull(log);
assertFalse(log.size() == 0);
final LoggingEvent lastLogEntry=log.get(log.size() - 1);
assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));
for (int i=0; i < 2; i++) {
updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This testcase tests whether the default value returned by
* DFSUtil.getInvalidateWorkPctPerIteration() is positive,
* and whether an IllegalArgumentException will be thrown
* when 0.0f is retrieved
*/
@Test public void testGetInvalidateWorkPctPerIteration(){
Configuration conf=new Configuration();
float blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertTrue(blocksInvalidateWorkPct > 0);
conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"0.5f");
blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertEquals(blocksInvalidateWorkPct,0.5f,blocksInvalidateWorkPct * 1e-7);
conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"1.0f");
blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertEquals(blocksInvalidateWorkPct,1.0f,blocksInvalidateWorkPct * 1e-7);
conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"0.0f");
exception.expect(IllegalArgumentException.class);
blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[1] are already chosen.
* So the 1st replica should be placed on a different rack than rack 1.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test public void testRereplicate2() throws Exception {
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[1]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
assertFalse(isOnSameRack(targets[1],dataNodes[0]));
}
APIUtilityVerifierIterativeVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testChooseTargetWithDependencies() throws Exception {
for (int i=0; i < NUM_OF_DATANODES; i++) {
cluster.remove(dataNodes[i]);
}
for (int i=0; i < NUM_OF_DATANODES_MORE_TARGETS; i++) {
DatanodeDescriptor node=dataNodesInMoreTargetsCase[i];
if (cluster.contains(node)) {
cluster.remove(node);
}
}
Host2NodesMap host2DatanodeMap=namenode.getNamesystem().getBlockManager().getDatanodeManager().getHost2DatanodeMap();
for (int i=0; i < NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) {
cluster.add(dataNodesForDependencies[i]);
host2DatanodeMap.add(dataNodesForDependencies[i]);
}
dataNodesForDependencies[1].addDependentHostName(dataNodesForDependencies[2].getHostName());
dataNodesForDependencies[2].addDependentHostName(dataNodesForDependencies[1].getHostName());
dataNodesForDependencies[3].addDependentHostName(dataNodesForDependencies[4].getHostName());
dataNodesForDependencies[4].addDependentHostName(dataNodesForDependencies[3].getHostName());
for (int i=0; i < NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) {
updateHeartbeatWithUsage(dataNodesForDependencies[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
List chosenNodes=new ArrayList();
DatanodeStorageInfo[] targets;
Set excludedNodes=new HashSet();
excludedNodes.add(dataNodesForDependencies[5]);
targets=chooseTarget(3,dataNodesForDependencies[1],chosenNodes,excludedNodes);
assertEquals(targets.length,2);
assertEquals(targets[0],storagesForDependencies[1]);
assertTrue(targets[1].equals(storagesForDependencies[3]) || targets[1].equals(storagesForDependencies[4]));
assertEquals(excludedNodes.size(),NUM_OF_DATANODES_FOR_DEPENDENCIES);
for (int i=0; i < NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) {
assertTrue(excludedNodes.contains(dataNodesForDependencies[i]));
}
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[3] are already chosen.
* So the 1st replica should be placed on the rack that the writer resides.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test public void testRereplicate3() throws Exception {
setupDataNodeCapacity();
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[3]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertTrue(isOnSameRack(dataNodes[0],targets[0]));
assertFalse(isOnSameRack(dataNodes[3],targets[0]));
targets=chooseTarget(1,dataNodes[3],chosenNodes);
assertEquals(targets.length,1);
assertTrue(isOnSameRack(dataNodes[3],targets[0]));
assertFalse(isOnSameNodeGroup(dataNodes[3],targets[0]));
assertFalse(isOnSameRack(dataNodes[0],targets[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(dataNodes[0],targets[0]));
assertFalse(isOnSameNodeGroup(dataNodes[0],targets[0]));
targets=chooseTarget(2,dataNodes[3],chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(dataNodes[3],targets[0]));
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[1] are already chosen.
* So the 1st replica should be placed on a different rack of rack 1.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test public void testRereplicate2() throws Exception {
setupDataNodeCapacity();
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[1]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(dataNodes[0],targets[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(dataNodes[0],targets[0]) && isOnSameRack(dataNodes[0],targets[1]));
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This testcase tests re-replication, when dataNodes[0] is already chosen.
* So the 1st replica can be placed on random rack.
* the 2nd replica should be placed on different node and nodegroup by same rack as
* the 1st replica. The 3rd replica can be placed randomly.
* @throws Exception
*/
@Test public void testRereplicate1() throws Exception {
setupDataNodeCapacity();
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(dataNodes[0],targets[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(dataNodes[0],targets[0]));
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3,chosenNodes);
assertEquals(targets.length,3);
assertTrue(isOnSameRack(dataNodes[0],targets[0]));
assertFalse(isOnSameNodeGroup(dataNodes[0],targets[0]));
assertFalse(isOnSameRack(targets[0],targets[2]));
}
BooleanVerifierEqualityVerifierHybridVerifier
/**
* Test replica placement policy in case of boundary topology.
* Rack 2 has only 1 node group & can't be placed with two replicas
* The 1st replica will be placed on writer.
* The 2nd replica should be placed on a different rack
* The 3rd replica should be placed on the same rack with writer, but on a
* different node group.
*/
@Test public void testChooseTargetsOnBoundaryTopology() throws Exception {
for (int i=0; i < NUM_OF_DATANODES; i++) {
cluster.remove(dataNodes[i]);
}
for (int i=0; i < NUM_OF_DATANODES_BOUNDARY; i++) {
cluster.add(dataNodesInBoundaryCase[i]);
}
for (int i=0; i < NUM_OF_DATANODES_BOUNDARY; i++) {
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0);
updateHeartbeatWithUsage(dataNodesInBoundaryCase[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,dataNodesInBoundaryCase[0]);
assertEquals(targets.length,0);
targets=chooseTarget(1,dataNodesInBoundaryCase[0]);
assertEquals(targets.length,1);
targets=chooseTarget(2,dataNodesInBoundaryCase[0]);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3,dataNodesInBoundaryCase[0]);
assertEquals(targets.length,3);
assertTrue(checkTargetsOnDifferentNodeGroup(targets));
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, client is is a node outside of file system.
* So the 1st replica can be placed on any node.
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 2nd replica,
* @throws Exception
*/
@Test public void testChooseTarget5() throws Exception {
setupDataNodeCapacity();
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,NODE);
assertEquals(targets.length,0);
targets=chooseTarget(1,NODE);
assertEquals(targets.length,1);
targets=chooseTarget(2,NODE);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3,NODE);
assertEquals(targets.length,3);
assertTrue(isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameRack(targets[0],targets[1]));
verifyNoTwoTargetsOnSameNodeGroup(targets);
}
IterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, client is dataNodes[0], but the dataNodes[1] is
* not allowed to be chosen. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on a different
* rack, the 3rd should be on same rack as the 2nd replica but in different
* node group, and the rest should be placed on a third rack.
* @throws Exception
*/
@Test public void testChooseTarget2() throws Exception {
DatanodeStorageInfo[] targets;
BlockPlacementPolicyDefault repl=(BlockPlacementPolicyDefault)replicator;
List chosenNodes=new ArrayList();
Set excludedNodes=new HashSet();
excludedNodes.add(dataNodes[1]);
targets=repl.chooseTarget(filename,4,dataNodes[0],chosenNodes,false,excludedNodes,BLOCK_SIZE,StorageType.DEFAULT);
assertEquals(targets.length,4);
assertEquals(storages[0],targets[0]);
assertTrue(cluster.isNodeGroupAware());
for (int i=1; i < 4; i++) {
assertFalse(isOnSameNodeGroup(targets[0],targets[i]));
}
assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3]));
assertFalse(isOnSameRack(targets[1],targets[3]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
chosenNodes.add(storages[2]);
targets=repl.chooseTarget(filename,1,dataNodes[0],chosenNodes,true,excludedNodes,BLOCK_SIZE,StorageType.DEFAULT);
System.out.println("targets=" + Arrays.asList(targets));
assertEquals(2,targets.length);
int i=0;
for (; i < targets.length && !storages[2].equals(targets[i]); i++) ;
assertTrue(i < targets.length);
}
BooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, client is dataNodes[0], but dataNodes[0] is not qualified
* to be chosen. So the 1st replica should be placed on dataNodes[1],
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 2nd replica but in different nodegroup,
* and the rest should be placed on the third rack.
* @throws Exception
*/
@Test public void testChooseTarget3() throws Exception {
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0);
assertEquals(targets.length,0);
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertEquals(storages[1],targets[0]);
targets=chooseTarget(2);
assertEquals(targets.length,2);
assertEquals(storages[1],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3);
assertEquals(targets.length,3);
assertEquals(storages[1],targets[0]);
assertTrue(isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(4);
assertEquals(targets.length,4);
assertEquals(storages[1],targets[0]);
assertTrue(cluster.isNodeGroupAware());
verifyNoTwoTargetsOnSameNodeGroup(targets);
assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3]));
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
IterativeVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, client is dataNodes[0], but none of the nodes on rack 1
* is qualified to be chosen. So the 1st replica should be placed on either
* rack 2 or rack 3.
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 1st replica, but
* in different node group.
* @throws Exception
*/
@Test public void testChooseTarget4() throws Exception {
for (int i=0; i < 3; i++) {
updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0);
}
DatanodeStorageInfo[] targets;
targets=chooseTarget(0);
assertEquals(targets.length,0);
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(dataNodes[0],targets[0]));
targets=chooseTarget(2);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(dataNodes[0],targets[0]));
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3);
assertEquals(targets.length,3);
for (int i=0; i < 3; i++) {
assertFalse(isOnSameRack(dataNodes[0],targets[i]));
}
verifyNoTwoTargetsOnSameNodeGroup(targets);
assertTrue(isOnSameRack(targets[0],targets[1]) || isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameRack(targets[0],targets[2]));
}
BooleanVerifierEqualityVerifierHybridVerifier
/**
* Test replica placement policy in case of targets more than number of
* NodeGroups.
* The 12-nodes cluster only has 6 NodeGroups, but in some cases, like:
* placing submitted job file, there is requirement to choose more (10)
* targets for placing replica. We should test it can return 6 targets.
*/
@Test public void testChooseMoreTargetsThanNodeGroups() throws Exception {
for (int i=0; i < NUM_OF_DATANODES; i++) {
cluster.remove(dataNodes[i]);
}
for (int i=0; i < NUM_OF_DATANODES_BOUNDARY; i++) {
DatanodeDescriptor node=dataNodesInBoundaryCase[i];
if (cluster.contains(node)) {
cluster.remove(node);
}
}
for (int i=0; i < NUM_OF_DATANODES_MORE_TARGETS; i++) {
cluster.add(dataNodesInMoreTargetsCase[i]);
}
for (int i=0; i < NUM_OF_DATANODES_MORE_TARGETS; i++) {
updateHeartbeatWithUsage(dataNodesInMoreTargetsCase[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
DatanodeStorageInfo[] targets;
targets=chooseTarget(3,dataNodesInMoreTargetsCase[0]);
assertEquals(targets.length,3);
assertTrue(checkTargetsOnDifferentNodeGroup(targets));
targets=chooseTarget(10,dataNodesInMoreTargetsCase[0]);
assertTrue(checkTargetsOnDifferentNodeGroup(targets));
assertEquals(targets.length,6);
}
BooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, client is dataNodes[0]. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on
* different rack and third should be placed on different node (and node group)
* of rack chosen for 2nd node.
* The only excpetion is when the numOfReplicas is 2,
* the 1st is on dataNodes[0] and the 2nd is on a different rack.
* @throws Exception
*/
@Test public void testChooseTarget1() throws Exception {
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,4,0);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0);
assertEquals(targets.length,0);
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertEquals(storages[0],targets[0]);
targets=chooseTarget(2);
assertEquals(targets.length,2);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3);
assertEquals(targets.length,3);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
assertTrue(isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameNodeGroup(targets[1],targets[2]));
targets=chooseTarget(4);
assertEquals(targets.length,4);
assertEquals(storages[0],targets[0]);
assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3]));
assertFalse(isOnSameRack(targets[0],targets[2]));
verifyNoTwoTargetsOnSameNodeGroup(targets);
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
APIUtilityVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test for an OS dependent absolute paths.
* @throws IOException
*/
@Test public void testAbsolutePathAsURI() throws IOException {
URI u=null;
u=Util.stringAsURI(ABSOLUTE_PATH_WINDOWS);
assertNotNull("Uri should not be null for Windows path" + ABSOLUTE_PATH_WINDOWS,u);
assertEquals(URI_FILE_SCHEMA,u.getScheme());
u=Util.stringAsURI(ABSOLUTE_PATH_UNIX);
assertNotNull("Uri should not be null for Unix path" + ABSOLUTE_PATH_UNIX,u);
assertEquals(URI_FILE_SCHEMA,u.getScheme());
}
APIUtilityVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test for a URI
* @throws IOException
*/
@Test public void testURI() throws IOException {
LOG.info("Testing correct Unix URI: " + URI_UNIX);
URI u=Util.stringAsURI(URI_UNIX);
LOG.info("Uri: " + u);
assertNotNull("Uri should not be null at this point",u);
assertEquals(URI_FILE_SCHEMA,u.getScheme());
assertEquals(URI_PATH_UNIX,u.getPath());
LOG.info("Testing correct windows URI: " + URI_WINDOWS);
u=Util.stringAsURI(URI_WINDOWS);
LOG.info("Uri: " + u);
assertNotNull("Uri should not be null at this point",u);
assertEquals(URI_FILE_SCHEMA,u.getScheme());
assertEquals(URI_PATH_WINDOWS.replace("%20"," "),u.getPath());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test write a file, verifies and closes it. Then a couple of random blocks
* is removed and BlockReport is forced; the FSNamesystem is pushed to
* recalculate required DN's activities such as replications and so on.
* The number of missing and under-replicated blocks should be the same in
* case of a single-DN cluster.
* @throws IOException in case of errors
*/
@Test(timeout=300000) public void blockReport_02() throws IOException {
final String METHOD_NAME=GenericTestUtils.getMethodName();
LOG.info("Running test " + METHOD_NAME);
Path filePath=new Path("/" + METHOD_NAME + ".dat");
DFSTestUtil.createFile(fs,filePath,FILE_SIZE,REPL_FACTOR,rand.nextLong());
File dataDir=new File(cluster.getDataDirectory());
assertTrue(dataDir.isDirectory());
List blocks2Remove=new ArrayList();
List removedIndex=new ArrayList();
List lBlocks=cluster.getNameNodeRpc().getBlockLocations(filePath.toString(),FILE_START,FILE_SIZE).getLocatedBlocks();
while (removedIndex.size() != 2) {
int newRemoveIndex=rand.nextInt(lBlocks.size());
if (!removedIndex.contains(newRemoveIndex)) removedIndex.add(newRemoveIndex);
}
for ( Integer aRemovedIndex : removedIndex) {
blocks2Remove.add(lBlocks.get(aRemovedIndex).getBlock());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Number of blocks allocated " + lBlocks.size());
}
final DataNode dn0=cluster.getDataNodes().get(DN_N0);
for ( ExtendedBlock b : blocks2Remove) {
if (LOG.isDebugEnabled()) {
LOG.debug("Removing the block " + b.getBlockName());
}
for ( File f : findAllFiles(dataDir,new MyFileFilter(b.getBlockName(),true))) {
DataNodeTestUtils.getFSDataset(dn0).unfinalizeBlock(b);
if (!f.delete()) {
LOG.warn("Couldn't delete " + b.getBlockName());
}
else {
LOG.debug("Deleted file " + f.toString());
}
}
}
waitTil(DN_RESCAN_EXTRA_WAIT);
String poolId=cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR=dn0.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports=getBlockReports(dn0,poolId,false,false);
sendBlockReports(dnR,poolId,reports);
BlockManagerTestUtil.getComputedDatanodeWork(cluster.getNamesystem().getBlockManager());
printStats();
assertEquals("Wrong number of MissingBlocks is found",blocks2Remove.size(),cluster.getNamesystem().getMissingBlocksCount());
assertEquals("Wrong number of UnderReplicatedBlocks is found",blocks2Remove.size(),cluster.getNamesystem().getUnderReplicatedBlocks());
}
InternalCallVerifierBooleanVerifierAssumptionSetterEqualityVerifierHybridVerifier
/**
* Test that individual volume failures do not cause DNs to fail, that
* all volumes failed on a single datanode do cause it to fail, and
* that the capacities and liveliness is adjusted correctly in the NN.
*/
@Test public void testSuccessiveVolumeFailures() throws Exception {
assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
cluster.startDataNodes(conf,2,true,null,null);
cluster.waitActive();
Thread.sleep(WAIT_FOR_HEARTBEATS);
final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager();
final long origCapacity=DFSTestUtil.getLiveDatanodeCapacity(dm);
long dnCapacity=DFSTestUtil.getDatanodeCapacity(dm,0);
File dn1Vol1=new File(dataDir,"data" + (2 * 0 + 1));
File dn2Vol1=new File(dataDir,"data" + (2 * 1 + 1));
File dn3Vol1=new File(dataDir,"data" + (2 * 2 + 1));
File dn3Vol2=new File(dataDir,"data" + (2 * 2 + 2));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn1Vol1,false));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn2Vol1,false));
Path file1=new Path("/test1");
DFSTestUtil.createFile(fs,file1,1024,(short)3,1L);
DFSTestUtil.waitReplication(fs,file1,(short)3);
ArrayList dns=cluster.getDataNodes();
assertTrue("DN1 should be up",dns.get(0).isDatanodeUp());
assertTrue("DN2 should be up",dns.get(1).isDatanodeUp());
assertTrue("DN3 should be up",dns.get(2).isDatanodeUp());
assertCounter("VolumeFailures",1L,getMetrics(dns.get(0).getMetrics().name()));
assertCounter("VolumeFailures",1L,getMetrics(dns.get(1).getMetrics().name()));
assertCounter("VolumeFailures",0L,getMetrics(dns.get(2).getMetrics().name()));
assert (WAIT_FOR_HEARTBEATS * 10) > WAIT_FOR_DEATH;
DFSTestUtil.waitForDatanodeStatus(dm,3,0,2,origCapacity - (1 * dnCapacity),WAIT_FOR_HEARTBEATS);
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol1,false));
Path file2=new Path("/test2");
DFSTestUtil.createFile(fs,file2,1024,(short)3,1L);
DFSTestUtil.waitReplication(fs,file2,(short)3);
assertTrue("DN3 should still be up",dns.get(2).isDatanodeUp());
assertCounter("VolumeFailures",1L,getMetrics(dns.get(2).getMetrics().name()));
ArrayList live=new ArrayList();
ArrayList dead=new ArrayList();
dm.fetchDatanodes(live,dead,false);
live.clear();
dead.clear();
dm.fetchDatanodes(live,dead,false);
assertEquals("DN3 should have 1 failed volume",1,live.get(2).getVolumeFailures());
dnCapacity=DFSTestUtil.getDatanodeCapacity(dm,0);
DFSTestUtil.waitForDatanodeStatus(dm,3,0,3,origCapacity - (3 * dnCapacity),WAIT_FOR_HEARTBEATS);
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol2,false));
Path file3=new Path("/test3");
DFSTestUtil.createFile(fs,file3,1024,(short)3,1L);
DFSTestUtil.waitReplication(fs,file3,(short)2);
DFSTestUtil.waitForDatanodeDeath(dns.get(2));
assertCounter("VolumeFailures",2L,getMetrics(dns.get(2).getMetrics().name()));
DFSTestUtil.waitForDatanodeStatus(dm,2,1,2,origCapacity - (4 * dnCapacity),WAIT_FOR_HEARTBEATS);
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn1Vol1,true));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn2Vol1,true));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol1,true));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol2,true));
cluster.restartDataNodes();
cluster.waitActive();
Path file4=new Path("/test4");
DFSTestUtil.createFile(fs,file4,1024,(short)3,1L);
DFSTestUtil.waitReplication(fs,file4,(short)3);
DFSTestUtil.waitForDatanodeStatus(dm,3,0,0,origCapacity,WAIT_FOR_HEARTBEATS);
}
BooleanVerifierAssumptionSetterHybridVerifier
/**
* Test that the NN re-learns of volume failures after restart.
*/
@Test public void testVolFailureStatsPreservedOnNNRestart() throws Exception {
assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
cluster.startDataNodes(conf,2,true,null,null);
cluster.waitActive();
final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager();
long origCapacity=DFSTestUtil.getLiveDatanodeCapacity(dm);
long dnCapacity=DFSTestUtil.getDatanodeCapacity(dm,0);
File dn1Vol1=new File(dataDir,"data" + (2 * 0 + 1));
File dn2Vol1=new File(dataDir,"data" + (2 * 1 + 1));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn1Vol1,false));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn2Vol1,false));
Path file1=new Path("/test1");
DFSTestUtil.createFile(fs,file1,1024,(short)2,1L);
DFSTestUtil.waitReplication(fs,file1,(short)2);
DFSTestUtil.waitForDatanodeStatus(dm,3,0,2,origCapacity - (1 * dnCapacity),WAIT_FOR_HEARTBEATS);
cluster.restartNameNode(0);
cluster.waitActive();
DFSTestUtil.waitForDatanodeStatus(dm,3,0,2,origCapacity - (1 * dnCapacity),WAIT_FOR_HEARTBEATS);
}
InternalCallVerifierAssumptionSetterEqualityVerifierHybridVerifier
/**
* Test that a volume that is considered failed on startup is seen as
* a failed volume by the NN.
*/
@Test public void testFailedVolumeOnStartupIsCounted() throws Exception {
assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager();
long origCapacity=DFSTestUtil.getLiveDatanodeCapacity(dm);
File dir=new File(cluster.getInstanceStorageDir(0,0),"current");
try {
prepareDirToFail(dir);
restartDatanodes(1,false);
assertEquals(true,cluster.getDataNodes().get(0).isBPServiceAlive(cluster.getNamesystem().getBlockPoolId()));
DFSTestUtil.waitForDatanodeStatus(dm,1,0,1,origCapacity / 2,WAIT_FOR_HEARTBEATS);
}
finally {
FileUtil.chmod(dir.toString(),"755");
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierAssumptionSetterHybridVerifier
/**
* Test the DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY configuration
* option, ie the DN tolerates a failed-to-use scenario during
* its start-up.
*/
@Test public void testValidVolumesAtStartup() throws Exception {
assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
cluster.shutdownDataNodes();
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,1);
File tld=new File(MiniDFSCluster.getBaseDirectory(),"badData");
File dataDir1=new File(tld,"data1");
File dataDir1Actual=new File(dataDir1,"1");
dataDir1Actual.mkdirs();
File dataDir2=new File(tld,"data2");
prepareDirToFail(dataDir2);
File dataDir2Actual=new File(dataDir2,"2");
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,dataDir1Actual.getPath() + "," + dataDir2Actual.getPath());
cluster.startDataNodes(conf,1,false,null,null);
cluster.waitActive();
try {
assertTrue("The DN should have started up fine.",cluster.isDataNodeUp());
DataNode dn=cluster.getDataNodes().get(0);
String si=DataNodeTestUtils.getFSDataset(dn).getStorageInfo();
assertTrue("The DN should have started with this directory",si.contains(dataDir1Actual.getPath()));
assertFalse("The DN shouldn't have a bad directory.",si.contains(dataDir2Actual.getPath()));
}
finally {
cluster.shutdownDataNodes();
FileUtil.chmod(dataDir2.toString(),"755");
}
}
BooleanVerifierAssumptionSetterHybridVerifier
/**
* Test the DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY configuration
* option, ie the DN shuts itself down when the number of failures
* experienced drops below the tolerated amount.
*/
@Test public void testConfigureMinValidVolumes() throws Exception {
assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,0);
cluster.startDataNodes(conf,2,true,null,null);
cluster.waitActive();
final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager();
long origCapacity=DFSTestUtil.getLiveDatanodeCapacity(dm);
long dnCapacity=DFSTestUtil.getDatanodeCapacity(dm,0);
File dn2Vol1=new File(dataDir,"data" + (2 * 1 + 1));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn2Vol1,false));
Path file1=new Path("/test1");
DFSTestUtil.createFile(fs,file1,1024,(short)3,1L);
DFSTestUtil.waitReplication(fs,file1,(short)2);
DFSTestUtil.waitForDatanodeStatus(dm,2,1,0,origCapacity - (1 * dnCapacity),WAIT_FOR_HEARTBEATS);
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn2Vol1,true));
Path file2=new Path("/test2");
DFSTestUtil.createFile(fs,file2,1024,(short)3,1L);
DFSTestUtil.waitReplication(fs,file2,(short)2);
}
TestInitializerBooleanVerifierHybridVerifier
@Before public void setUp() throws IOException {
storage=new DataStorage();
nsInfo=new NamespaceInfo(0,CLUSTER_ID,DEFAULT_BPID,CTIME,BUILD_VERSION,SOFTWARE_VERSION);
FileUtil.fullyDelete(TEST_DIR);
assertTrue("Failed to make test dir.",TEST_DIR.mkdirs());
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testRecoverTransitionReadFailure() throws IOException {
final int numLocations=3;
List locations=createStorageLocations(numLocations,true);
try {
storage.recoverTransitionRead(mockDN,nsInfo,locations,START_OPT);
fail("An IOException should throw: all StorageLocations are NON_EXISTENT");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("All specified directories are not accessible or do not exist.",e);
}
assertEquals(0,storage.getNumStorageDirs());
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* This test enforces the behavior that if there is an exception from
* doTransition() during DN starts up, the storage directories that have
* already been processed are still visible, i.e., in
* DataStorage.storageDirs().
*/
@Test public void testRecoverTransitionReadDoTransitionFailure() throws IOException {
final int numLocations=3;
List locations=createStorageLocations(numLocations);
String bpid=nsInfo.getBlockPoolID();
storage.recoverTransitionRead(mockDN,bpid,nsInfo,locations,START_OPT);
storage.unlockAll();
storage=new DataStorage();
nsInfo.clusterID="cluster1";
try {
storage.recoverTransitionRead(mockDN,bpid,nsInfo,locations,START_OPT);
fail("Expect to throw an exception from doTransition()");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Incompatible clusterIDs",e);
}
assertEquals(numLocations,storage.getNumStorageDirs());
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testAddStorageDirectories() throws IOException, URISyntaxException {
final int numLocations=3;
final int numNamespace=3;
List locations=createStorageLocations(numLocations);
List namespaceInfos=createNamespaceInfos(numNamespace);
for ( NamespaceInfo ni : namespaceInfos) {
storage.addStorageLocations(mockDN,ni,locations,START_OPT);
for ( StorageLocation sl : locations) {
checkDir(sl.getFile());
checkDir(sl.getFile(),ni.getBlockPoolID());
}
}
assertEquals(numLocations,storage.getNumStorageDirs());
locations=createStorageLocations(numLocations);
try {
storage.addStorageLocations(mockDN,namespaceInfos.get(0),locations,START_OPT);
fail("Expected to throw IOException: adding active directories.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("All specified directories are not accessible or do not exist.",e);
}
assertEquals(numLocations,storage.getNumStorageDirs());
locations=createStorageLocations(6);
storage.addStorageLocations(mockDN,nsInfo,locations,START_OPT);
assertEquals(6,storage.getNumStorageDirs());
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test public void testDifferentLayoutVersions() throws Exception {
assertEquals(HdfsConstants.NAMENODE_LAYOUT_VERSION,actor.retrieveNamespaceInfo().getLayoutVersion());
doReturn(HdfsConstants.NAMENODE_LAYOUT_VERSION * 1000).when(fakeNsInfo).getLayoutVersion();
try {
actor.retrieveNamespaceInfo();
}
catch ( IOException e) {
fail("Should not fail to retrieve NS info from DN with different layout version");
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testSoftwareVersionDifferences() throws Exception {
assertEquals(VersionInfo.getVersion(),actor.retrieveNamespaceInfo().getSoftwareVersion());
doReturn("4.0.0").when(fakeNsInfo).getSoftwareVersion();
doReturn("3.0.0").when(mockDnConf).getMinimumNameNodeVersion();
assertEquals("4.0.0",actor.retrieveNamespaceInfo().getSoftwareVersion());
doReturn("3.0.0").when(fakeNsInfo).getSoftwareVersion();
doReturn("4.0.0").when(mockDnConf).getMinimumNameNodeVersion();
try {
actor.retrieveNamespaceInfo();
fail("Should have thrown an exception for NN with too-low version");
}
catch ( IncorrectVersionException ive) {
GenericTestUtils.assertExceptionContains("The reported NameNode version is too low",ive);
LOG.info("Got expected exception",ive);
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test for{@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock,long,long)}
*/
@Test public void testUpdateReplicaUnderRecovery() throws IOException {
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
String bpid=cluster.getNamesystem().getBlockPoolId();
DistributedFileSystem dfs=cluster.getFileSystem();
String filestr="/foo";
Path filepath=new Path(filestr);
DFSTestUtil.createFile(dfs,filepath,1024L,(short)3,0L);
final LocatedBlock locatedblock=getLastLocatedBlock(DFSClientAdapter.getDFSClient(dfs).getNamenode(),filestr);
final DatanodeInfo[] datanodeinfo=locatedblock.getLocations();
Assert.assertTrue(datanodeinfo.length > 0);
final DataNode datanode=cluster.getDataNode(datanodeinfo[0].getIpcPort());
Assert.assertTrue(datanode != null);
final ExtendedBlock b=locatedblock.getBlock();
final long recoveryid=b.getGenerationStamp() + 1;
final long newlength=b.getNumBytes() - 1;
final FsDatasetSpi> fsdataset=DataNodeTestUtils.getFSDataset(datanode);
final ReplicaRecoveryInfo rri=fsdataset.initReplicaRecovery(new RecoveringBlock(b,null,recoveryid));
final ReplicaInfo replica=FsDatasetTestUtil.fetchReplicaInfo(fsdataset,bpid,b.getBlockId());
Assert.assertEquals(ReplicaState.RUR,replica.getState());
FsDatasetImpl.checkReplicaFiles(replica);
{
final ExtendedBlock tmp=new ExtendedBlock(b.getBlockPoolId(),rri.getBlockId(),rri.getNumBytes() - 1,rri.getGenerationStamp());
try {
fsdataset.updateReplicaUnderRecovery(tmp,recoveryid,newlength);
Assert.fail();
}
catch ( IOException ioe) {
System.out.println("GOOD: getting " + ioe);
}
}
final String storageID=fsdataset.updateReplicaUnderRecovery(new ExtendedBlock(b.getBlockPoolId(),rri),recoveryid,newlength);
assertTrue(storageID != null);
}
finally {
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test {@link FsDatasetImpl#initReplicaRecovery(String,ReplicaMap,Block,long,long)}
*/
@Test public void testInitReplicaRecovery() throws IOException {
final long firstblockid=10000L;
final long gs=7777L;
final long length=22L;
final ReplicaMap map=new ReplicaMap(this);
String bpid="BP-TEST";
final Block[] blocks=new Block[5];
for (int i=0; i < blocks.length; i++) {
blocks[i]=new Block(firstblockid + i,length,gs);
map.add(bpid,createReplicaInfo(blocks[i]));
}
{
final Block b=blocks[0];
final ReplicaInfo originalInfo=map.get(bpid,b);
final long recoveryid=gs + 1;
final ReplicaRecoveryInfo recoveryInfo=FsDatasetImpl.initReplicaRecovery(bpid,map,blocks[0],recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
assertEquals(originalInfo,recoveryInfo);
final ReplicaUnderRecovery updatedInfo=(ReplicaUnderRecovery)map.get(bpid,b);
Assert.assertEquals(originalInfo.getBlockId(),updatedInfo.getBlockId());
Assert.assertEquals(recoveryid,updatedInfo.getRecoveryID());
final long recoveryid2=gs + 2;
final ReplicaRecoveryInfo recoveryInfo2=FsDatasetImpl.initReplicaRecovery(bpid,map,blocks[0],recoveryid2,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
assertEquals(originalInfo,recoveryInfo2);
final ReplicaUnderRecovery updatedInfo2=(ReplicaUnderRecovery)map.get(bpid,b);
Assert.assertEquals(originalInfo.getBlockId(),updatedInfo2.getBlockId());
Assert.assertEquals(recoveryid2,updatedInfo2.getRecoveryID());
try {
FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.fail();
}
catch ( RecoveryInProgressException ripe) {
System.out.println("GOOD: getting " + ripe);
}
}
{
final long recoveryid=gs + 1;
final Block b=new Block(firstblockid - 1,length,gs);
ReplicaRecoveryInfo r=FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.assertNull("Data-node should not have this replica.",r);
}
{
final long recoveryid=gs - 1;
final Block b=new Block(firstblockid + 1,length,gs);
try {
FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.fail();
}
catch ( IOException ioe) {
System.out.println("GOOD: getting " + ioe);
}
}
{
final long recoveryid=gs + 1;
final Block b=new Block(firstblockid,length,gs + 1);
try {
FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
fail("InitReplicaRecovery should fail because replica's " + "gs is less than the block's gs");
}
catch ( IOException e) {
e.getMessage().startsWith("replica.getGenerationStamp() < block.getGenerationStamp(), block=");
}
}
}
UtilityVerifierExceptionVerifierHybridVerifier
/**
* Test to verify that InterDatanode RPC timesout as expected when
* the server DN does not respond.
*/
@Test(expected=SocketTimeoutException.class) public void testInterDNProtocolTimeout() throws Throwable {
final Server server=new TestServer(1,true);
server.start();
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
DatanodeID fakeDnId=DFSTestUtil.getLocalDatanodeID(addr.getPort());
DatanodeInfo dInfo=new DatanodeInfo(fakeDnId);
InterDatanodeProtocol proxy=null;
try {
proxy=DataNode.createInterDataNodeProtocolProxy(dInfo,conf,500,false);
proxy.initReplicaRecovery(new RecoveringBlock(new ExtendedBlock("bpid",1),null,100));
fail("Expected SocketTimeoutException exception, but did not get.");
}
finally {
if (proxy != null) {
RPC.stopProxy(proxy);
}
server.stop();
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Tests for setting xattr
* 1. Set xattr with XAttrSetFlag.CREATE|XAttrSetFlag.REPLACE flag.
* 2. Set xattr with illegal name.
* 3. Set xattr without XAttrSetFlag.
* 4. Set xattr and total number exceeds max limit.
* 5. Set xattr and name is too long.
* 6. Set xattr and value is too long.
*/
@Test(timeout=120000) public void testSetXAttr() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
Map xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(value1,xattrs.get(name1));
fs.removeXAttr(path,name1);
try {
fs.setXAttr(path,null,value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
Assert.fail("Setting xattr with null name should fail.");
}
catch ( NullPointerException e) {
GenericTestUtils.assertExceptionContains("XAttr name cannot be null",e);
}
catch ( RemoteException e) {
GenericTestUtils.assertExceptionContains("XAttr name cannot be null",e);
}
try {
fs.setXAttr(path,"user.",value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
Assert.fail("Setting xattr with empty name should fail.");
}
catch ( RemoteException e) {
assertEquals("Unexpected RemoteException: " + e,e.getClassName(),HadoopIllegalArgumentException.class.getCanonicalName());
GenericTestUtils.assertExceptionContains("XAttr name cannot be empty",e);
}
catch ( HadoopIllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("XAttr name cannot be empty",e);
}
try {
fs.setXAttr(path,"a1",value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
Assert.fail("Setting xattr with invalid name prefix or without " + "name prefix should fail.");
}
catch ( RemoteException e) {
assertEquals("Unexpected RemoteException: " + e,e.getClassName(),HadoopIllegalArgumentException.class.getCanonicalName());
GenericTestUtils.assertExceptionContains("XAttr name must be prefixed",e);
}
catch ( HadoopIllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("XAttr name must be prefixed",e);
}
fs.setXAttr(path,name1,value1);
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(value1,xattrs.get(name1));
fs.removeXAttr(path,name1);
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name1,newValue1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(newValue1,xattrs.get(name1));
fs.removeXAttr(path,name1);
fs.setXAttr(path,name1,value1);
fs.setXAttr(path,name2,value2);
fs.setXAttr(path,name3,null);
try {
fs.setXAttr(path,name4,null);
Assert.fail("Setting xattr should fail if total number of xattrs " + "for inode exceeds max limit.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Cannot add additional XAttr",e);
}
fs.removeXAttr(path,name1);
fs.removeXAttr(path,name2);
fs.removeXAttr(path,name3);
String longName="user.0123456789abcdefX";
try {
fs.setXAttr(path,longName,null);
Assert.fail("Setting xattr should fail if name is too long.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("XAttr is too big",e);
GenericTestUtils.assertExceptionContains("total size is 17",e);
}
byte[] longValue=new byte[MAX_SIZE];
try {
fs.setXAttr(path,"user.a",longValue);
Assert.fail("Setting xattr should fail if value is too long.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("XAttr is too big",e);
GenericTestUtils.assertExceptionContains("total size is 17",e);
}
String name="user.111";
byte[] value=new byte[MAX_SIZE - 3];
fs.setXAttr(path,name,value);
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Tests for replacing xattr
* 1. Replace an xattr using XAttrSetFlag.REPLACE.
* 2. Replace an xattr which doesn't exist and expect an exception.
* 3. Create multiple xattrs and replace some.
* 4. Restart NN and save checkpoint scenarios.
*/
@Test(timeout=120000) public void testReplaceXAttr() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name1,newValue1,EnumSet.of(XAttrSetFlag.REPLACE));
Map xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(newValue1,xattrs.get(name1));
fs.removeXAttr(path,name1);
try {
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.REPLACE));
Assert.fail("Replacing xattr which does not exist should fail.");
}
catch ( IOException e) {
}
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,null,EnumSet.of(XAttrSetFlag.REPLACE));
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(new byte[0],xattrs.get(name2));
restart(false);
initFileSystem();
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(new byte[0],xattrs.get(name2));
restart(true);
initFileSystem();
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(new byte[0],xattrs.get(name2));
fs.removeXAttr(path,name1);
fs.removeXAttr(path,name2);
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* removexattr tests. Test that removexattr throws an exception if any of
* the following are true:
* an xattr that was requested doesn't exist
* the caller specifies an unknown namespace
* the caller doesn't have access to the namespace
* the caller doesn't have permission to get the value of the xattr
* the caller does not have "execute" (scan) access to the parent directory
* the caller has only read access to the owning directory
* the caller has only execute access to the owning directory and execute
* access to the actual entity
* the caller does not have execute access to the owning directory and write
* access to the actual entity
*/
@Test(timeout=120000) public void testRemoveXAttrPermissions() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name3,null,EnumSet.of(XAttrSetFlag.CREATE));
try {
fs.removeXAttr(path,name2);
fs.removeXAttr(path,name2);
Assert.fail("expected IOException");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("No matching attributes found",e);
}
final String expectedExceptionString="An XAttr name must be prefixed " + "with user/trusted/security/system/raw, followed by a '.'";
try {
fs.removeXAttr(path,"wackynamespace.foo");
Assert.fail("expected IOException");
}
catch ( RemoteException e) {
assertEquals("Unexpected RemoteException: " + e,e.getClassName(),HadoopIllegalArgumentException.class.getCanonicalName());
GenericTestUtils.assertExceptionContains(expectedExceptionString,e);
}
catch ( HadoopIllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(expectedExceptionString,e);
}
final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"});
fs.setXAttr(path,"trusted.foo","1234".getBytes());
try {
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
userFs.removeXAttr(path,"trusted.foo");
return null;
}
}
);
Assert.fail("expected IOException");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("User doesn't have permission",e);
}
finally {
fs.removeXAttr(path,"trusted.foo");
}
fs.setPermission(path,new FsPermission((short)0700));
try {
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
userFs.removeXAttr(path,name1);
return null;
}
}
);
Assert.fail("expected IOException");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied",e);
}
final Path childDir=new Path(path,"child" + pathCount);
FileSystem.mkdirs(fs,childDir,FsPermission.createImmutable((short)0700));
fs.setXAttr(childDir,name1,"1234".getBytes());
try {
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
userFs.removeXAttr(childDir,name1);
return null;
}
}
);
Assert.fail("expected IOException");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied",e);
}
fs.setPermission(path,new FsPermission((short)0704));
try {
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
userFs.removeXAttr(childDir,name1);
return null;
}
}
);
Assert.fail("expected IOException");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied",e);
}
fs.setPermission(path,new FsPermission((short)0701));
fs.setPermission(childDir,new FsPermission((short)0701));
try {
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
userFs.removeXAttr(childDir,name1);
return null;
}
}
);
Assert.fail("expected IOException");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied",e);
}
fs.setPermission(path,new FsPermission((short)0701));
fs.setPermission(childDir,new FsPermission((short)0706));
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
userFs.removeXAttr(childDir,name1);
return null;
}
}
);
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Tests for creating xattr
* 1. Create an xattr using XAttrSetFlag.CREATE.
* 2. Create an xattr which already exists and expect an exception.
* 3. Create multiple xattrs.
* 4. Restart NN and save checkpoint scenarios.
*/
@Test(timeout=120000) public void testCreateXAttr() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
Map xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(value1,xattrs.get(name1));
fs.removeXAttr(path,name1);
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),0);
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
try {
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
Assert.fail("Creating xattr which already exists should fail.");
}
catch ( IOException e) {
}
fs.removeXAttr(path,name1);
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,null,EnumSet.of(XAttrSetFlag.CREATE));
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(new byte[0],xattrs.get(name2));
restart(false);
initFileSystem();
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(new byte[0],xattrs.get(name2));
restart(true);
initFileSystem();
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(new byte[0],xattrs.get(name2));
fs.removeXAttr(path,name1);
fs.removeXAttr(path,name2);
}
UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testRawXAttrs() throws Exception {
final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"});
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
{
final byte[] value=fs.getXAttr(rawPath,raw1);
Assert.assertArrayEquals(value,value1);
}
{
final Map xattrs=fs.getXAttrs(rawPath);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(value1,xattrs.get(raw1));
fs.removeXAttr(rawPath,raw1);
}
{
fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(rawPath,raw1,newValue1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
final Map xattrs=fs.getXAttrs(rawPath);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(newValue1,xattrs.get(raw1));
fs.removeXAttr(rawPath,raw1);
}
{
fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(rawPath,raw2,value2,EnumSet.of(XAttrSetFlag.CREATE));
final List xattrNames=fs.listXAttrs(rawPath);
assertTrue(xattrNames.contains(raw1));
assertTrue(xattrNames.contains(raw2));
assertTrue(xattrNames.size() == 2);
fs.removeXAttr(rawPath,raw1);
fs.removeXAttr(rawPath,raw2);
}
{
fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(rawPath,raw2,value2,EnumSet.of(XAttrSetFlag.CREATE));
final List xattrNames=fs.listXAttrs(path);
assertTrue(xattrNames.size() == 0);
fs.removeXAttr(rawPath,raw1);
fs.removeXAttr(rawPath,raw2);
}
{
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
try {
userFs.setXAttr(path,raw1,value1);
fail("setXAttr should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.setXAttr(rawPath,raw1,value1);
fail("setXAttr should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.getXAttrs(rawPath);
fail("getXAttrs should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.getXAttrs(path);
fail("getXAttrs should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.getXAttr(rawPath,raw1);
fail("getXAttr should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.getXAttr(path,raw1);
fail("getXAttr should have thrown");
}
catch ( AccessControlException e) {
}
return null;
}
}
);
}
{
fs.setXAttr(rawPath,raw1,value1);
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
try {
userFs.getXAttr(rawPath,raw1);
fail("getXAttr should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.getXAttr(path,raw1);
fail("getXAttr should have thrown");
}
catch ( AccessControlException e) {
}
final List xattrNames=userFs.listXAttrs(path);
assertTrue(xattrNames.size() == 0);
try {
userFs.listXAttrs(rawPath);
fail("listXAttrs on raw path should have thrown");
}
catch ( AccessControlException e) {
}
return null;
}
}
);
fs.removeXAttr(rawPath,raw1);
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
/**
* Test the listXAttrs api.
* listXAttrs on a path that doesn't exist.
* listXAttrs on a path with no XAttrs
* Check basic functionality.
* Check that read access to parent dir is not enough to get xattr names
* Check that write access to the parent dir is not enough to get names
* Check that execute/scan access to the parent dir is sufficient to get
* xattr names.
*/
@Test(timeout=120000) public void testListXAttrs() throws Exception {
final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"});
try {
fs.listXAttrs(path);
fail("expected FileNotFoundException");
}
catch ( FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("cannot find",e);
}
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
final List noXAttrs=fs.listXAttrs(path);
assertTrue("XAttrs were found?",noXAttrs.size() == 0);
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE));
final List xattrNames=fs.listXAttrs(path);
assertTrue(xattrNames.contains(name1));
assertTrue(xattrNames.contains(name2));
assertTrue(xattrNames.size() == 2);
fs.setPermission(path,new FsPermission((short)0704));
final Path childDir=new Path(path,"child" + pathCount);
FileSystem.mkdirs(fs,childDir,FsPermission.createImmutable((short)0700));
fs.setXAttr(childDir,name1,"1234".getBytes());
try {
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
userFs.listXAttrs(childDir);
return null;
}
}
);
fail("expected IOException");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied",e);
}
fs.setPermission(path,new FsPermission((short)0702));
try {
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
userFs.listXAttrs(childDir);
return null;
}
}
);
fail("expected IOException");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied",e);
}
fs.setPermission(path,new FsPermission((short)0701));
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
userFs.listXAttrs(childDir);
return null;
}
}
);
fs.setXAttr(childDir,"trusted.myxattr","1234".getBytes());
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
assertTrue(userFs.listXAttrs(childDir).size() == 1);
return null;
}
}
);
assertTrue(fs.listXAttrs(childDir).size() == 2);
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testXAttrAcl() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setOwner(path,BRUCE.getUserName(),null);
FileSystem fsAsBruce=createFileSystem(BRUCE);
FileSystem fsAsDiana=createFileSystem(DIANA);
fsAsBruce.setXAttr(path,name1,value1);
Map xattrs;
try {
xattrs=fsAsDiana.getXAttrs(path);
Assert.fail("Diana should not have read access to get xattrs");
}
catch ( AccessControlException e) {
}
fsAsBruce.modifyAclEntries(path,Lists.newArrayList(aclEntry(ACCESS,USER,DIANA.getUserName(),READ)));
xattrs=fsAsDiana.getXAttrs(path);
Assert.assertArrayEquals(value1,xattrs.get(name1));
try {
fsAsDiana.removeXAttr(path,name1);
Assert.fail("Diana should not have write access to remove xattrs");
}
catch ( AccessControlException e) {
}
try {
fsAsDiana.setXAttr(path,name2,value2);
Assert.fail("Diana should not have write access to set xattrs");
}
catch ( AccessControlException e) {
}
fsAsBruce.modifyAclEntries(path,Lists.newArrayList(aclEntry(ACCESS,USER,DIANA.getUserName(),ALL)));
fsAsDiana.setXAttr(path,name2,value2);
Assert.assertArrayEquals(value2,fsAsDiana.getXAttrs(path).get(name2));
fsAsDiana.removeXAttr(path,name1);
fsAsDiana.removeXAttr(path,name2);
}
UtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierHybridVerifier
/**
* start MiniDFScluster, try formatting with different settings
* @throws IOException
* @throws InterruptedException
*/
@Test public void testAllowFormat() throws IOException {
LOG.info("--starting mini cluster");
NameNode nn;
config.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY,true);
cluster=new MiniDFSCluster.Builder(config).manageDataDfsDirs(false).manageNameDfsDirs(false).build();
cluster.waitActive();
assertNotNull(cluster);
nn=cluster.getNameNode();
assertNotNull(nn);
LOG.info("Mini cluster created OK");
LOG.info("Verifying format will fail with allowformat false");
config.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY,false);
try {
cluster.shutdown();
NameNode.format(config);
fail("Format succeeded, when it should have failed");
}
catch ( IOException e) {
assertTrue("Exception was not about formatting Namenode",e.getMessage().startsWith("The option " + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY));
LOG.info("Expected failure: " + StringUtils.stringifyException(e));
LOG.info("Done verifying format will fail with allowformat false");
}
LOG.info("Verifying format will succeed with allowformat true");
config.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY,true);
NameNode.format(config);
LOG.info("Done verifying format will succeed with allowformat true");
}
BooleanVerifierEqualityVerifierHybridVerifier
/**
* Minor test related to HADOOP-9155. Verify that during a
* FileSystem.setPermission() operation, the stat passed in during the
* logAuditEvent() call returns the new permission rather than the old
* permission.
*/
@Test public void testAuditLoggerWithSetPermission() throws IOException {
Configuration conf=new HdfsConfiguration();
conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,DummyAuditLogger.class.getName());
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitClusterUp();
assertTrue(DummyAuditLogger.initialized);
DummyAuditLogger.resetLogCount();
FileSystem fs=cluster.getFileSystem();
long time=System.currentTimeMillis();
final Path p=new Path("/");
fs.setTimes(p,time,time);
fs.setPermission(p,new FsPermission(TEST_PERMISSION));
assertEquals(TEST_PERMISSION,DummyAuditLogger.foundPermission);
assertEquals(2,DummyAuditLogger.logCount);
}
finally {
cluster.shutdown();
}
}
BooleanVerifierEqualityVerifierHybridVerifier
/**
* Tests that AuditLogger works as expected.
*/
@Test public void testAuditLogger() throws IOException {
Configuration conf=new HdfsConfiguration();
conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,DummyAuditLogger.class.getName());
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitClusterUp();
assertTrue(DummyAuditLogger.initialized);
DummyAuditLogger.resetLogCount();
FileSystem fs=cluster.getFileSystem();
long time=System.currentTimeMillis();
fs.setTimes(new Path("/"),time,time);
assertEquals(1,DummyAuditLogger.logCount);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test NameNode.getBlockLocations(..) on reading un-closed files.
*/
@Test public void testGetBlockLocations() throws IOException {
final NamenodeProtocols namenode=cluster.getNameNodeRpc();
final Path p=new Path(BASE_DIR,"file2.dat");
final String src=p.toString();
final FSDataOutputStream out=TestFileCreation.createFile(hdfs,p,3);
int len=BLOCK_SIZE >>> 1;
writeFile(p,out,len);
for (int i=1; i < NUM_BLOCKS; ) {
final LocatedBlocks lb=namenode.getBlockLocations(src,0,len);
final List blocks=lb.getLocatedBlocks();
assertEquals(i,blocks.size());
final Block b=blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
assertTrue(b instanceof BlockInfoUnderConstruction);
if (++i < NUM_BLOCKS) {
writeFile(p,out,BLOCK_SIZE);
len+=BLOCK_SIZE;
}
}
out.close();
}
UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testCreateAndModifyPools() throws Exception {
String poolName="pool1";
String ownerName="abc";
String groupName="123";
FsPermission mode=new FsPermission((short)0755);
long limit=150;
dfs.addCachePool(new CachePoolInfo(poolName).setOwnerName(ownerName).setGroupName(groupName).setMode(mode).setLimit(limit));
RemoteIterator iter=dfs.listCachePools();
CachePoolInfo info=iter.next().getInfo();
assertEquals(poolName,info.getPoolName());
assertEquals(ownerName,info.getOwnerName());
assertEquals(groupName,info.getGroupName());
ownerName="def";
groupName="456";
mode=new FsPermission((short)0700);
limit=151;
dfs.modifyCachePool(new CachePoolInfo(poolName).setOwnerName(ownerName).setGroupName(groupName).setMode(mode).setLimit(limit));
iter=dfs.listCachePools();
info=iter.next().getInfo();
assertEquals(poolName,info.getPoolName());
assertEquals(ownerName,info.getOwnerName());
assertEquals(groupName,info.getGroupName());
assertEquals(mode,info.getMode());
assertEquals(limit,(long)info.getLimit());
dfs.removeCachePool(poolName);
iter=dfs.listCachePools();
assertFalse("expected no cache pools after deleting pool",iter.hasNext());
proto.listCachePools(null);
try {
proto.removeCachePool("pool99");
fail("expected to get an exception when " + "removing a non-existent pool.");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Cannot remove non-existent",ioe);
}
try {
proto.removeCachePool(poolName);
fail("expected to get an exception when " + "removing a non-existent pool.");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Cannot remove non-existent",ioe);
}
iter=dfs.listCachePools();
assertFalse("expected no cache pools after deleting pool",iter.hasNext());
}
UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testMaxRelativeExpiry() throws Exception {
try {
dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(-1l));
fail("Added a pool with a negative max expiry.");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("negative",e);
}
try {
dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(Long.MAX_VALUE - 1));
fail("Added a pool with too big of a max expiry.");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("too big",e);
}
CachePoolInfo coolPool=new CachePoolInfo("coolPool");
final long poolExpiration=1000 * 60 * 10l;
dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(poolExpiration));
RemoteIterator poolIt=dfs.listCachePools();
CachePoolInfo listPool=poolIt.next().getInfo();
assertFalse("Should only be one pool",poolIt.hasNext());
assertEquals("Expected max relative expiry to match set value",poolExpiration,listPool.getMaxRelativeExpiryMs().longValue());
try {
dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(-1l));
fail("Added a pool with a negative max expiry.");
}
catch ( InvalidRequestException e) {
assertExceptionContains("negative",e);
}
try {
dfs.modifyCachePool(coolPool.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER + 1));
fail("Added a pool with too big of a max expiry.");
}
catch ( InvalidRequestException e) {
assertExceptionContains("too big",e);
}
CacheDirectiveInfo defaultExpiry=new CacheDirectiveInfo.Builder().setPath(new Path("/blah")).setPool(coolPool.getPoolName()).build();
dfs.addCacheDirective(defaultExpiry);
RemoteIterator dirIt=dfs.listCacheDirectives(defaultExpiry);
CacheDirectiveInfo listInfo=dirIt.next().getInfo();
assertFalse("Should only have one entry in listing",dirIt.hasNext());
long listExpiration=listInfo.getExpiration().getAbsoluteMillis() - new Date().getTime();
assertTrue("Directive expiry should be approximately the pool's max expiry",Math.abs(listExpiration - poolExpiration) < 10 * 1000);
CacheDirectiveInfo.Builder builder=new CacheDirectiveInfo.Builder().setPath(new Path("/lolcat")).setPool(coolPool.getPoolName());
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newRelative(poolExpiration + 1)).build());
fail("Added a directive that exceeds pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newAbsolute(new Date().getTime() + poolExpiration + (10 * 1000))).build());
fail("Added a directive that exceeds pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newRelative(poolExpiration + 1)).build());
fail("Modified a directive to exceed pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newAbsolute(new Date().getTime() + poolExpiration + (10 * 1000))).build());
fail("Modified a directive to exceed pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newRelative(Long.MAX_VALUE)).build());
fail("Added a directive with a gigantic max value");
}
catch ( IllegalArgumentException e) {
assertExceptionContains("is too far in the future",e);
}
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newAbsolute(Long.MAX_VALUE)).build());
fail("Added a directive with a gigantic max value");
}
catch ( InvalidRequestException e) {
assertExceptionContains("is too far in the future",e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.NEVER).build());
fail("Modified a directive to exceed pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newAbsolute(Long.MAX_VALUE)).build());
fail("Modified a directive to exceed pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("is too far in the future",e);
}
CachePoolInfo destPool=new CachePoolInfo("destPool");
dfs.addCachePool(destPool.setMaxRelativeExpiryMs(poolExpiration / 2));
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setPool(destPool.getPoolName()).build());
fail("Modified a directive to a pool with a lower max expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setPool(destPool.getPoolName()).setExpiration(Expiration.newRelative(poolExpiration / 2)).build());
dirIt=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool(destPool.getPoolName()).build());
listInfo=dirIt.next().getInfo();
listExpiration=listInfo.getExpiration().getAbsoluteMillis() - new Date().getTime();
assertTrue("Unexpected relative expiry " + listExpiration + " expected approximately "+ poolExpiration / 2,Math.abs(poolExpiration / 2 - listExpiration) < 10 * 1000);
dfs.modifyCachePool(destPool.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER));
poolIt=dfs.listCachePools();
listPool=poolIt.next().getInfo();
while (!listPool.getPoolName().equals(destPool.getPoolName())) {
listPool=poolIt.next().getInfo();
}
assertEquals("Expected max relative expiry to match set value",CachePoolInfo.RELATIVE_EXPIRY_NEVER,listPool.getMaxRelativeExpiryMs().longValue());
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(listInfo.getId()).setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER)).build());
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(listInfo.getId()).setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER - 1)).build());
}
UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testLimit() throws Exception {
try {
dfs.addCachePool(new CachePoolInfo("poolofnegativity").setLimit(-99l));
fail("Should not be able to set a negative limit");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("negative",e);
}
final String destiny="poolofdestiny";
final Path path1=new Path("/destiny");
DFSTestUtil.createFile(dfs,path1,2 * BLOCK_SIZE,(short)1,0x9494);
final CachePoolInfo poolInfo=new CachePoolInfo(destiny).setLimit(2 * BLOCK_SIZE - 1);
dfs.addCachePool(poolInfo);
final CacheDirectiveInfo info1=new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path1).build();
try {
dfs.addCacheDirective(info1);
fail("Should not be able to cache when there is no more limit");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("remaining capacity",e);
}
poolInfo.setLimit(2 * BLOCK_SIZE);
dfs.modifyCachePool(poolInfo);
long id1=dfs.addCacheDirective(info1);
waitForCachePoolStats(dfs,2 * BLOCK_SIZE,2 * BLOCK_SIZE,1,1,poolInfo,"testLimit:1");
final Path path2=new Path("/failure");
DFSTestUtil.createFile(dfs,path2,BLOCK_SIZE,(short)1,0x9495);
try {
dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path2).build(),EnumSet.noneOf(CacheFlag.class));
fail("Should not be able to add another cached file");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("remaining capacity",e);
}
poolInfo.setLimit(BLOCK_SIZE);
dfs.modifyCachePool(poolInfo);
waitForCachePoolStats(dfs,2 * BLOCK_SIZE,0,1,0,poolInfo,"testLimit:2");
RemoteIterator it=dfs.listCachePools();
assertTrue("Expected a cache pool",it.hasNext());
CachePoolStats stats=it.next().getStats();
assertEquals("Overlimit bytes should be difference of needed and limit",BLOCK_SIZE,stats.getBytesOverlimit());
CachePoolInfo inadequate=new CachePoolInfo("poolofinadequacy").setLimit(BLOCK_SIZE);
dfs.addCachePool(inadequate);
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(),EnumSet.noneOf(CacheFlag.class));
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("remaining capacity",e);
}
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(),EnumSet.of(CacheFlag.FORCE));
dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(inadequate.getPoolName()).setPath(path1).build(),EnumSet.of(CacheFlag.FORCE));
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testCacheManagerRestart() throws Exception {
SecondaryNameNode secondary=null;
try {
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,"0.0.0.0:0");
secondary=new SecondaryNameNode(conf);
final String pool="poolparty";
String groupName="partygroup";
FsPermission mode=new FsPermission((short)0777);
long limit=747;
dfs.addCachePool(new CachePoolInfo(pool).setGroupName(groupName).setMode(mode).setLimit(limit));
RemoteIterator pit=dfs.listCachePools();
assertTrue("No cache pools found",pit.hasNext());
CachePoolInfo info=pit.next().getInfo();
assertEquals(pool,info.getPoolName());
assertEquals(groupName,info.getGroupName());
assertEquals(mode,info.getMode());
assertEquals(limit,(long)info.getLimit());
assertFalse("Unexpected # of cache pools found",pit.hasNext());
int numEntries=10;
String entryPrefix="/party-";
long prevId=-1;
final Date expiry=new Date();
for (int i=0; i < numEntries; i++) {
prevId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path(entryPrefix + i)).setPool(pool).setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).build());
}
RemoteIterator dit=dfs.listCacheDirectives(null);
for (int i=0; i < numEntries; i++) {
assertTrue("Unexpected # of cache entries: " + i,dit.hasNext());
CacheDirectiveInfo cd=dit.next().getInfo();
assertEquals(i + 1,cd.getId().longValue());
assertEquals(entryPrefix + i,cd.getPath().toUri().getPath());
assertEquals(pool,cd.getPool());
}
assertFalse("Unexpected # of cache directives found",dit.hasNext());
secondary.doCheckpoint();
final String imagePool="imagePool";
dfs.addCachePool(new CachePoolInfo(imagePool));
prevId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/image")).setPool(imagePool).build());
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.saveNamespace();
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
boolean fetchImage=secondary.doCheckpoint();
assertTrue("Secondary should have fetched a new fsimage from NameNode",fetchImage);
dfs.removeCachePool(imagePool);
cluster.restartNameNode();
pit=dfs.listCachePools();
assertTrue("No cache pools found",pit.hasNext());
info=pit.next().getInfo();
assertEquals(pool,info.getPoolName());
assertEquals(pool,info.getPoolName());
assertEquals(groupName,info.getGroupName());
assertEquals(mode,info.getMode());
assertEquals(limit,(long)info.getLimit());
assertFalse("Unexpected # of cache pools found",pit.hasNext());
dit=dfs.listCacheDirectives(null);
for (int i=0; i < numEntries; i++) {
assertTrue("Unexpected # of cache entries: " + i,dit.hasNext());
CacheDirectiveInfo cd=dit.next().getInfo();
assertEquals(i + 1,cd.getId().longValue());
assertEquals(entryPrefix + i,cd.getPath().toUri().getPath());
assertEquals(pool,cd.getPool());
assertEquals(expiry.getTime(),cd.getExpiration().getMillis());
}
assertFalse("Unexpected # of cache directives found",dit.hasNext());
long nextId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/foobar")).setPool(pool).build());
assertEquals(prevId + 1,nextId);
}
finally {
if (secondary != null) {
secondary.shutdown();
}
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testAddRemoveDirectives() throws Exception {
proto.addCachePool(new CachePoolInfo("pool1").setMode(new FsPermission((short)0777)));
proto.addCachePool(new CachePoolInfo("pool2").setMode(new FsPermission((short)0777)));
proto.addCachePool(new CachePoolInfo("pool3").setMode(new FsPermission((short)0777)));
proto.addCachePool(new CachePoolInfo("pool4").setMode(new FsPermission((short)0)));
CacheDirectiveInfo alpha=new CacheDirectiveInfo.Builder().setPath(new Path("/alpha")).setPool("pool1").build();
CacheDirectiveInfo beta=new CacheDirectiveInfo.Builder().setPath(new Path("/beta")).setPool("pool2").build();
CacheDirectiveInfo delta=new CacheDirectiveInfo.Builder().setPath(new Path("/delta")).setPool("pool1").build();
long alphaId=addAsUnprivileged(alpha);
long alphaId2=addAsUnprivileged(alpha);
assertFalse("Expected to get unique directives when re-adding an " + "existing CacheDirectiveInfo",alphaId == alphaId2);
long betaId=addAsUnprivileged(beta);
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/unicorn")).setPool("no_such_pool").build());
fail("expected an error when adding to a non-existent pool.");
}
catch ( InvalidRequestException ioe) {
GenericTestUtils.assertExceptionContains("Unknown pool",ioe);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/blackhole")).setPool("pool4").build());
fail("expected an error when adding to a pool with " + "mode 0 (no permissions for anyone).");
}
catch ( AccessControlException e) {
GenericTestUtils.assertExceptionContains("Permission denied while accessing pool",e);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/illegal:path/")).setPool("pool1").build());
fail("expected an error when adding a malformed path " + "to the cache directives.");
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("is not a valid DFS filename",e);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/emptypoolname")).setReplication((short)1).setPool("").build());
fail("expected an error when adding a cache " + "directive with an empty pool name.");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("Invalid empty pool name",e);
}
long deltaId=addAsUnprivileged(delta);
long relativeId=addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("relative")).setPool("pool1").build());
RemoteIterator iter;
iter=dfs.listCacheDirectives(null);
validateListAll(iter,alphaId,alphaId2,betaId,deltaId,relativeId);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool3").build());
assertFalse(iter.hasNext());
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool1").build());
validateListAll(iter,alphaId,alphaId2,deltaId,relativeId);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build());
validateListAll(iter,betaId);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(alphaId2).build());
validateListAll(iter,alphaId2);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(relativeId).build());
validateListAll(iter,relativeId);
dfs.removeCacheDirective(betaId);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build());
assertFalse(iter.hasNext());
try {
dfs.removeCacheDirective(betaId);
fail("expected an error when removing a non-existent ID");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("No directive with ID",e);
}
try {
proto.removeCacheDirective(-42l);
fail("expected an error when removing a negative ID");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("Invalid negative ID",e);
}
try {
proto.removeCacheDirective(43l);
fail("expected an error when removing a non-existent ID");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("No directive with ID",e);
}
dfs.removeCacheDirective(alphaId);
dfs.removeCacheDirective(alphaId2);
dfs.removeCacheDirective(deltaId);
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(relativeId).setReplication((short)555).build());
iter=dfs.listCacheDirectives(null);
assertTrue(iter.hasNext());
CacheDirectiveInfo modified=iter.next().getInfo();
assertEquals(relativeId,modified.getId().longValue());
assertEquals((short)555,modified.getReplication().shortValue());
dfs.removeCacheDirective(relativeId);
iter=dfs.listCacheDirectives(null);
assertFalse(iter.hasNext());
CacheDirectiveInfo directive=new CacheDirectiveInfo.Builder().setPath(new Path(".")).setPool("pool1").build();
long id=dfs.addCacheDirective(directive);
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(directive).setId(id).setReplication((short)2).build());
dfs.removeCacheDirective(id);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testListCachePoolPermissions() throws Exception {
final UserGroupInformation myUser=UserGroupInformation.createRemoteUser("myuser");
final DistributedFileSystem myDfs=(DistributedFileSystem)DFSTestUtil.getFileSystemAs(myUser,conf);
final String poolName="poolparty";
dfs.addCachePool(new CachePoolInfo(poolName).setMode(new FsPermission((short)0700)));
RemoteIterator it=myDfs.listCachePools();
CachePoolInfo info=it.next().getInfo();
assertFalse(it.hasNext());
assertEquals("Expected pool name",poolName,info.getPoolName());
assertNull("Unexpected owner name",info.getOwnerName());
assertNull("Unexpected group name",info.getGroupName());
assertNull("Unexpected mode",info.getMode());
assertNull("Unexpected limit",info.getLimit());
final long limit=99;
dfs.modifyCachePool(new CachePoolInfo(poolName).setOwnerName(myUser.getShortUserName()).setLimit(limit));
it=myDfs.listCachePools();
info=it.next().getInfo();
assertFalse(it.hasNext());
assertEquals("Expected pool name",poolName,info.getPoolName());
assertEquals("Mismatched owner name",myUser.getShortUserName(),info.getOwnerName());
assertNotNull("Expected group name",info.getGroupName());
assertEquals("Mismatched mode",(short)0700,info.getMode().toShort());
assertEquals("Mismatched limit",limit,(long)info.getLimit());
}
UtilityVerifierBooleanVerifierHybridVerifier
/**
* Test the importCheckpoint startup option. Verifies:
* 1. if the NN already contains an image, it will not be allowed
* to import a checkpoint.
* 2. if the NN does not contain an image, importing a checkpoint
* succeeds and re-saves the image
*/
@Test public void testImportCheckpoint() throws Exception {
Configuration conf=new HdfsConfiguration();
Path testPath=new Path("/testfile");
SecondaryNameNode snn=null;
MiniDFSCluster cluster=null;
Collection nameDirs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
nameDirs=cluster.getNameDirs(0);
cluster.getFileSystem().mkdirs(testPath);
snn=startSecondaryNameNode(conf);
snn.doCheckpoint();
}
finally {
cleanup(snn);
cleanup(cluster);
cluster=null;
}
LOG.info("Trying to import checkpoint when the NameNode already " + "contains an image. This should fail.");
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).startupOption(StartupOption.IMPORT).build();
fail("NameNode did not fail to start when it already contained " + "an image");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("NameNode already contains an image",ioe);
}
finally {
cleanup(cluster);
cluster=null;
}
LOG.info("Removing NN storage contents");
for ( URI uri : nameDirs) {
File dir=new File(uri.getPath());
LOG.info("Cleaning " + dir);
removeAndRecreateDir(dir);
}
LOG.info("Trying to import checkpoint");
try {
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(0).startupOption(StartupOption.IMPORT).build();
assertTrue("Path from checkpoint should exist after import",cluster.getFileSystem().exists(testPath));
FSImageTestUtil.assertNNHasCheckpoints(cluster,Ints.asList(3));
}
finally {
cleanup(cluster);
cluster=null;
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test case where the NN is configured with a name-only and an edits-only
* dir, with storage-restore turned on. In this case, if the name-only dir
* disappears and comes back, a new checkpoint after it has been restored
* should function correctly.
* @throws Exception
*/
@Test public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception {
MiniDFSCluster cluster=null;
SecondaryNameNode secondary=null;
File currentDir=null;
Configuration conf=new HdfsConfiguration();
File base_dir=new File(MiniDFSCluster.getBaseDirectory());
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY,true);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,MiniDFSCluster.getBaseDirectory() + "/name-only");
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,MiniDFSCluster.getBaseDirectory() + "/edits-only");
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,fileAsURI(new File(base_dir,"namesecondary1")).toString());
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).manageNameDfsDirs(false).build();
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
NamenodeProtocols nn=cluster.getNameNodeRpc();
NNStorage storage=cluster.getNameNode().getFSImage().getStorage();
StorageDirectory sd0=storage.getStorageDir(0);
assertEquals(NameNodeDirType.IMAGE,sd0.getStorageDirType());
currentDir=sd0.getCurrentDir();
assertEquals(0,FileUtil.chmod(currentDir.getAbsolutePath(),"000"));
try {
secondary.doCheckpoint();
fail("Did not fail to checkpoint when there are no valid storage dirs");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("No targets in destination storage",ioe);
}
assertEquals(0,FileUtil.chmod(currentDir.getAbsolutePath(),"755"));
nn.restoreFailedStorage("true");
nn.rollEditLog();
secondary.doCheckpoint();
assertNNHasCheckpoints(cluster,ImmutableList.of(8));
assertParallelFilesInvariant(cluster,ImmutableList.of(secondary));
}
finally {
if (currentDir != null) {
FileUtil.chmod(currentDir.getAbsolutePath(),"755");
}
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that the secondary doesn't have to re-download image
* if it hasn't changed.
*/
@Test public void testSecondaryImageDownload() throws IOException {
LOG.info("Starting testSecondaryImageDownload");
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,"0.0.0.0:0");
Path dir=new Path("/checkpoint");
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
cluster.waitActive();
FileSystem fileSys=cluster.getFileSystem();
FSImage image=cluster.getNameNode().getFSImage();
SecondaryNameNode secondary=null;
try {
assertTrue(!fileSys.exists(dir));
secondary=startSecondaryNameNode(conf);
File secondaryDir=new File(MiniDFSCluster.getBaseDirectory(),"namesecondary1");
File secondaryCurrent=new File(secondaryDir,"current");
long expectedTxIdToDownload=cluster.getNameNode().getFSImage().getStorage().getMostRecentCheckpointTxId();
File secondaryFsImageBefore=new File(secondaryCurrent,NNStorage.getImageFileName(expectedTxIdToDownload));
File secondaryFsImageAfter=new File(secondaryCurrent,NNStorage.getImageFileName(expectedTxIdToDownload + 2));
assertFalse("Secondary should start with empty current/ dir " + "but " + secondaryFsImageBefore + " exists",secondaryFsImageBefore.exists());
assertTrue("Secondary should have loaded an image",secondary.doCheckpoint());
assertTrue("Secondary should have downloaded original image",secondaryFsImageBefore.exists());
assertTrue("Secondary should have created a new image",secondaryFsImageAfter.exists());
long fsimageLength=secondaryFsImageBefore.length();
assertEquals("Image size should not have changed",fsimageLength,secondaryFsImageAfter.length());
fileSys.mkdirs(dir);
assertFalse("Another checkpoint should not have to re-load image",secondary.doCheckpoint());
for ( StorageDirectory sd : image.getStorage().dirIterable(NameNodeDirType.IMAGE)) {
File imageFile=NNStorage.getImageFile(sd,NameNodeFile.IMAGE,expectedTxIdToDownload + 5);
assertTrue("Image size increased",imageFile.length() > fsimageLength);
}
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
InternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test that, if the edits dir is separate from the name dir, it is
* properly locked.
*/
@Test public void testSeparateEditsDirLocking() throws IOException {
Configuration conf=new HdfsConfiguration();
File nameDir=new File(MiniDFSCluster.getBaseDirectory(),"name");
File editsDir=new File(MiniDFSCluster.getBaseDirectory(),"testSeparateEditsDirLocking");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameDir.getAbsolutePath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,editsDir.getAbsolutePath());
MiniDFSCluster cluster=null;
StorageDirectory savedSd=null;
try {
cluster=new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false).numDataNodes(0).build();
NNStorage storage=cluster.getNameNode().getFSImage().getStorage();
for ( StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) {
assertEquals(editsDir.getAbsoluteFile(),sd.getRoot());
assertLockFails(sd);
savedSd=sd;
}
}
finally {
cleanup(cluster);
cluster=null;
}
assertNotNull(savedSd);
assertClusterStartFailsWhenDirLocked(conf,savedSd);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Starts two namenodes and two secondary namenodes, verifies that secondary
* namenodes are configured correctly to talk to their respective namenodes
* and can do the checkpoint.
* @throws IOException
*/
@Test public void testMultipleSecondaryNamenodes() throws IOException {
Configuration conf=new HdfsConfiguration();
String nameserviceId1="ns1";
String nameserviceId2="ns2";
conf.set(DFSConfigKeys.DFS_NAMESERVICES,nameserviceId1 + "," + nameserviceId2);
MiniDFSCluster cluster=null;
SecondaryNameNode secondary1=null;
SecondaryNameNode secondary2=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(conf.get(DFSConfigKeys.DFS_NAMESERVICES))).build();
Configuration snConf1=new HdfsConfiguration(cluster.getConfiguration(0));
Configuration snConf2=new HdfsConfiguration(cluster.getConfiguration(1));
InetSocketAddress nn1RpcAddress=cluster.getNameNode(0).getNameNodeAddress();
InetSocketAddress nn2RpcAddress=cluster.getNameNode(1).getNameNodeAddress();
String nn1=nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort();
String nn2=nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort();
snConf1.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,"");
snConf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,"");
snConf1.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,nameserviceId1),nn1);
snConf2.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,nameserviceId2),nn2);
secondary1=startSecondaryNameNode(snConf1);
secondary2=startSecondaryNameNode(snConf2);
assertEquals(secondary1.getNameNodeAddress().getPort(),nn1RpcAddress.getPort());
assertEquals(secondary2.getNameNodeAddress().getPort(),nn2RpcAddress.getPort());
assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2.getNameNodeAddress().getPort());
secondary1.doCheckpoint();
secondary2.doCheckpoint();
}
finally {
cleanup(secondary1);
secondary1=null;
cleanup(secondary2);
secondary2=null;
cleanup(cluster);
cluster=null;
}
}
UtilityVerifierBooleanVerifierHybridVerifier
/**
* Tests the following sequence of events:
* - secondary successfully makes a checkpoint
* - it then fails while trying to upload it
* - it then fails again for the same reason
* - it then tries to checkpoint a third time
*/
@Test public void testCheckpointAfterTwoFailedUploads() throws IOException {
MiniDFSCluster cluster=null;
SecondaryNameNode secondary=null;
Configuration conf=new HdfsConfiguration();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
secondary=startSecondaryNameNode(conf);
Mockito.doThrow(new IOException("Injecting failure after rolling edit logs")).when(faultInjector).afterSecondaryCallsRollEditLog();
try {
secondary.doCheckpoint();
fail("Should have failed upload");
}
catch ( IOException ioe) {
LOG.info("Got expected failure",ioe);
assertTrue(ioe.toString().contains("Injecting failure"));
}
try {
secondary.doCheckpoint();
fail("Should have failed upload");
}
catch ( IOException ioe) {
LOG.info("Got expected failure",ioe);
assertTrue(ioe.toString().contains("Injecting failure"));
}
finally {
Mockito.reset(faultInjector);
}
secondary.doCheckpoint();
}
finally {
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
UtilityVerifierBooleanVerifierHybridVerifier
/**
* Test case where the name node is reformatted while the secondary namenode
* is running. The secondary should shut itself down if if talks to a NN
* with the wrong namespace.
*/
@Test public void testReformatNNBetweenCheckpoints() throws IOException {
MiniDFSCluster cluster=null;
SecondaryNameNode secondary=null;
Configuration conf=new HdfsConfiguration();
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,1);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
int origPort=cluster.getNameNodePort();
int origHttpPort=cluster.getNameNode().getHttpAddress().getPort();
Configuration snnConf=new Configuration(conf);
File checkpointDir=new File(MiniDFSCluster.getBaseDirectory(),"namesecondary");
snnConf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,checkpointDir.getAbsolutePath());
secondary=startSecondaryNameNode(snnConf);
secondary.doCheckpoint();
cluster.shutdown();
cluster=null;
try {
Thread.sleep(100);
}
catch ( InterruptedException ie) {
}
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).nameNodePort(origPort).nameNodeHttpPort(origHttpPort).format(true).build();
try {
secondary.doCheckpoint();
fail("Should have failed checkpoint against a different namespace");
}
catch ( IOException ioe) {
LOG.info("Got expected failure",ioe);
assertTrue(ioe.toString().contains("Inconsistent checkpoint"));
}
}
finally {
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
/**
* Test that a fault while downloading edits does not prevent future
* checkpointing
*/
@Test(timeout=30000) public void testEditFailureBeforeRename() throws IOException {
Configuration conf=new HdfsConfiguration();
SecondaryNameNode secondary=null;
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fs=cluster.getFileSystem();
secondary=startSecondaryNameNode(conf);
DFSTestUtil.createFile(fs,new Path("tmpfile0"),1024,(short)1,0l);
secondary.doCheckpoint();
Mockito.doThrow(new IOException("Injecting failure before edit rename")).when(faultInjector).beforeEditsRename();
DFSTestUtil.createFile(fs,new Path("tmpfile1"),1024,(short)1,0l);
try {
secondary.doCheckpoint();
fail("Fault injection failed.");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Injecting failure before edit rename",ioe);
}
Mockito.reset(faultInjector);
for ( StorageDirectory sd : secondary.getFSImage().getStorage().dirIterable(NameNodeDirType.EDITS)) {
File[] tmpEdits=sd.getCurrentDir().listFiles(tmpEditsFilter);
assertTrue("Expected a single tmp edits file in directory " + sd.toString(),tmpEdits.length == 1);
RandomAccessFile randFile=new RandomAccessFile(tmpEdits[0],"rw");
randFile.setLength(0);
randFile.close();
}
secondary.doCheckpoint();
}
finally {
if (secondary != null) {
secondary.shutdown();
}
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
Mockito.reset(faultInjector);
}
}
APIUtilityVerifierUtilityVerifierBooleanVerifierHybridVerifier
/**
* Test that the primary NN will not serve any files to a 2NN who doesn't
* share its namespace ID, and also will not accept any files from one.
*/
@Test public void testNamespaceVerifiedOnFileTransfer() throws IOException {
MiniDFSCluster cluster=null;
Configuration conf=new HdfsConfiguration();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
NamenodeProtocols nn=cluster.getNameNodeRpc();
URL fsName=DFSUtil.getInfoServer(cluster.getNameNode().getServiceRpcAddress(),conf,DFSUtil.getHttpClientScheme(conf)).toURL();
nn.rollEditLog();
RemoteEditLogManifest manifest=nn.getEditLogManifest(1);
RemoteEditLog log=manifest.getLogs().get(0);
NNStorage dstImage=Mockito.mock(NNStorage.class);
Mockito.doReturn(Lists.newArrayList(new File("/wont-be-written"))).when(dstImage).getFiles(Mockito.anyObject(),Mockito.anyString());
File mockImageFile=File.createTempFile("image","");
FileOutputStream imageFile=new FileOutputStream(mockImageFile);
imageFile.write("data".getBytes());
imageFile.close();
Mockito.doReturn(mockImageFile).when(dstImage).findImageFile(Mockito.any(NameNodeFile.class),Mockito.anyLong());
Mockito.doReturn(new StorageInfo(1,1,"X",1,NodeType.NAME_NODE).toColonSeparatedString()).when(dstImage).toColonSeparatedString();
try {
TransferFsImage.downloadImageToStorage(fsName,0,dstImage,false);
fail("Storage info was not verified");
}
catch ( IOException ioe) {
String msg=StringUtils.stringifyException(ioe);
assertTrue(msg,msg.contains("but the secondary expected"));
}
try {
TransferFsImage.downloadEditsToStorage(fsName,log,dstImage);
fail("Storage info was not verified");
}
catch ( IOException ioe) {
String msg=StringUtils.stringifyException(ioe);
assertTrue(msg,msg.contains("but the secondary expected"));
}
try {
TransferFsImage.uploadImageFromStorage(fsName,conf,dstImage,NameNodeFile.IMAGE,0);
fail("Storage info was not verified");
}
catch ( IOException ioe) {
String msg=StringUtils.stringifyException(ioe);
assertTrue(msg,msg.contains("but the secondary expected"));
}
}
finally {
cleanup(cluster);
cluster=null;
}
}
UtilityVerifierBooleanVerifierHybridVerifier
/**
* Test that, an attempt to lock a storage that is already locked by a nodename,
* logs error message that includes JVM name of the namenode that locked it.
*/
@Test public void testStorageAlreadyLockedErrorMessage() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
StorageDirectory savedSd=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
NNStorage storage=cluster.getNameNode().getFSImage().getStorage();
for ( StorageDirectory sd : storage.dirIterable(null)) {
assertLockFails(sd);
savedSd=sd;
}
LogCapturer logs=GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(Storage.class));
try {
savedSd.lock();
fail("Namenode should not be able to lock a storage that is already locked");
}
catch ( IOException ioe) {
String lockingJvmName=Path.WINDOWS ? "" : " " + ManagementFactory.getRuntimeMXBean().getName();
String expectedLogMessage="It appears that another namenode" + lockingJvmName + " has already locked the storage directory";
assertTrue("Log output does not contain expected log message: " + expectedLogMessage,logs.getOutput().contains(expectedLogMessage));
}
}
finally {
cleanup(cluster);
cluster=null;
}
}
APIUtilityVerifierBranchVerifierUtilityVerifierEqualityVerifierHybridVerifier
/**
* Test namenode format with -format -force -clusterid option when name
* directory exists. Format should succeed.
* @throws IOException
*/
@Test public void testFormatWithForceAndClusterId() throws IOException {
if (!hdfsDir.mkdirs()) {
fail("Failed to create dir " + hdfsDir.getPath());
}
String myId="testFormatWithForceAndClusterId";
String[] argv={"-format","-force","-clusterid",myId};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cId=getClusterId(config);
assertEquals("ClusterIds do not match",myId,cId);
}
APIUtilityVerifierBranchVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test namenode format with -format -force options when name directory
* exists. Format should succeed.
* @throws IOException
*/
@Test public void testFormatWithForce() throws IOException {
if (!hdfsDir.mkdirs()) {
fail("Failed to create dir " + hdfsDir.getPath());
}
String[] argv={"-format","-force"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
APIUtilityVerifierBranchVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test namenode format with -force -nonInteractive -force option. Format
* should succeed.
* @throws IOException
*/
@Test public void testFormatWithNonInteractiveAndForce() throws IOException {
if (!hdfsDir.mkdirs()) {
fail("Failed to create dir " + hdfsDir.getPath());
}
String[] argv={"-format","-nonInteractive","-force"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
BranchVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test namenode format with -format option when a non empty name directory
* exists. Enter N when prompted and format should be aborted.
* @throws IOException
* @throws InterruptedException
*/
@Test public void testFormatWithoutForceEnterNo() throws IOException, InterruptedException {
File data=new File(hdfsDir,"file");
if (!data.mkdirs()) {
fail("Failed to create dir " + data.getPath());
}
InputStream origIn=System.in;
ByteArrayInputStream bins=new ByteArrayInputStream("N\n".getBytes());
System.setIn(bins);
String[] argv={"-format"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should not have succeeded",1,e.status);
}
System.setIn(origIn);
File version=new File(hdfsDir,"current/VERSION");
assertFalse("Check version should not exist",version.exists());
}
APIUtilityVerifierBranchVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test namenode format with -format option when an empty name directory
* exists. Format should succeed.
* @throws IOException
*/
@Test public void testFormatWithEmptyDir() throws IOException {
if (!hdfsDir.mkdirs()) {
fail("Failed to create dir " + hdfsDir.getPath());
}
String[] argv={"-format"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
APIUtilityVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test namenode format with -format -nonInteractive options when name
* directory does not exist. Format should succeed.
* @throws IOException
*/
@Test public void testFormatWithNonInteractiveNameDirDoesNotExit() throws IOException {
String[] argv={"-format","-nonInteractive"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
APIUtilityVerifierBranchVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test namenode format with -format option when a non empty name directory
* exists. Enter Y when prompted and the format should succeed.
* @throws IOException
* @throws InterruptedException
*/
@Test public void testFormatWithoutForceEnterYes() throws IOException, InterruptedException {
File data=new File(hdfsDir,"file");
if (!data.mkdirs()) {
fail("Failed to create dir " + data.getPath());
}
InputStream origIn=System.in;
ByteArrayInputStream bins=new ByteArrayInputStream("Y\n".getBytes());
System.setIn(bins);
String[] argv={"-format"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
System.setIn(origIn);
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
APIUtilityVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test namenode format with -format option. Format should succeed.
* @throws IOException
*/
@Test public void testFormat() throws IOException {
String[] argv={"-format"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
BranchVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test namenode format with -format -nonInteractive options when a non empty
* name directory exists. Format should not succeed.
* @throws IOException
*/
@Test public void testFormatWithNonInteractive() throws IOException {
File data=new File(hdfsDir,"file");
if (!data.mkdirs()) {
fail("Failed to create dir " + data.getPath());
}
String[] argv={"-format","-nonInteractive"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have been aborted with exit code 1",1,e.status);
}
File version=new File(hdfsDir,"current/VERSION");
assertFalse("Check version should not exist",version.exists());
}
APIUtilityVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test loading an editlog with gaps. A single editlog directory
* is set up. On of the edit log files is deleted. This should
* fail when selecting the input streams as it will not be able
* to select enough streams to load up to 4*TXNS_PER_ROLL.
* There should be 4*TXNS_PER_ROLL transactions as we rolled 3
* times.
*/
@Test public void testLoadingWithGaps() throws IOException {
File f1=new File(TEST_DIR + "/gaptest0");
List editUris=ImmutableList.of(f1.toURI());
NNStorage storage=setupEdits(editUris,3);
final long startGapTxId=1 * TXNS_PER_ROLL + 1;
final long endGapTxId=2 * TXNS_PER_ROLL;
File[] files=new File(f1,"current").listFiles(new FilenameFilter(){
@Override public boolean accept( File dir, String name){
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId,endGapTxId))) {
return true;
}
return false;
}
}
);
assertEquals(1,files.length);
assertTrue(files[0].delete());
FSEditLog editlog=getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId=1;
try {
editlog.selectInputStreams(startTxId,4 * TXNS_PER_ROLL);
fail("Should have thrown exception");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Gap in transactions. Expected to be able to read up until " + "at least txid 40 but unable to find any edit logs containing " + "txid 11",ioe);
}
}
APIUtilityVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test edit log failover. If a single edit log is missing, other
* edits logs should be used instead.
*/
@Test public void testEditLogFailOverFromMissing() throws IOException {
File f1=new File(TEST_DIR + "/failover0");
File f2=new File(TEST_DIR + "/failover1");
List editUris=ImmutableList.of(f1.toURI(),f2.toURI());
NNStorage storage=setupEdits(editUris,3);
final long startErrorTxId=1 * TXNS_PER_ROLL + 1;
final long endErrorTxId=2 * TXNS_PER_ROLL;
File[] files=new File(f1,"current").listFiles(new FilenameFilter(){
@Override public boolean accept( File dir, String name){
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId,endErrorTxId))) {
return true;
}
return false;
}
}
);
assertEquals(1,files.length);
assertTrue(files[0].delete());
FSEditLog editlog=getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId=1;
Collection streams=null;
try {
streams=editlog.selectInputStreams(startTxId,4 * TXNS_PER_ROLL);
readAllEdits(streams,startTxId);
}
catch ( IOException e) {
LOG.error("edit log failover didn't work",e);
fail("Edit log failover didn't work");
}
finally {
IOUtils.cleanup(null,streams.toArray(new EditLogInputStream[0]));
}
}
APIUtilityVerifierUtilityVerifierEqualityVerifierHybridVerifier
/**
* Test edit log failover from a corrupt edit log
*/
@Test public void testEditLogFailOverFromCorrupt() throws IOException {
File f1=new File(TEST_DIR + "/failover0");
File f2=new File(TEST_DIR + "/failover1");
List editUris=ImmutableList.of(f1.toURI(),f2.toURI());
NNStorage storage=setupEdits(editUris,3);
final long startErrorTxId=1 * TXNS_PER_ROLL + 1;
final long endErrorTxId=2 * TXNS_PER_ROLL;
File[] files=new File(f1,"current").listFiles(new FilenameFilter(){
@Override public boolean accept( File dir, String name){
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId,endErrorTxId))) {
return true;
}
return false;
}
}
);
assertEquals(1,files.length);
long fileLen=files[0].length();
LOG.debug("Corrupting Log File: " + files[0] + " len: "+ fileLen);
RandomAccessFile rwf=new RandomAccessFile(files[0],"rw");
rwf.seek(fileLen - 4);
int b=rwf.readInt();
rwf.seek(fileLen - 4);
rwf.writeInt(b + 1);
rwf.close();
FSEditLog editlog=getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId=1;
Collection streams=null;
try {
streams=editlog.selectInputStreams(startTxId,4 * TXNS_PER_ROLL);
readAllEdits(streams,startTxId);
}
catch ( IOException e) {
LOG.error("edit log failover didn't work",e);
fail("Edit log failover didn't work");
}
finally {
IOUtils.cleanup(null,streams.toArray(new EditLogInputStream[0]));
}
}
APIUtilityVerifierUtilityVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testEditChecksum() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
FSImage fsimage=namesystem.getFSImage();
final FSEditLog editLog=fsimage.getEditLog();
fileSys.mkdirs(new Path("/tmp"));
Iterator iter=fsimage.getStorage().dirIterator(NameNodeDirType.EDITS);
LinkedList sds=new LinkedList();
while (iter.hasNext()) {
sds.add(iter.next());
}
editLog.close();
cluster.shutdown();
for ( StorageDirectory sd : sds) {
File editFile=NNStorage.getFinalizedEditsFile(sd,1,3);
assertTrue(editFile.exists());
long fileLen=editFile.length();
LOG.debug("Corrupting Log File: " + editFile + " len: "+ fileLen);
RandomAccessFile rwf=new RandomAccessFile(editFile,"rw");
rwf.seek(fileLen - 4);
int b=rwf.readInt();
rwf.seek(fileLen - 4);
rwf.writeInt(b + 1);
rwf.close();
}
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).build();
fail("should not be able to start");
}
catch ( IOException e) {
assertNotNull("Cause of exception should be ChecksumException",e.getCause());
assertEquals("Cause of exception should be ChecksumException",ChecksumException.class,e.getCause().getClass());
}
}
UtilityVerifierBooleanVerifierHybridVerifier
@Test public void testAllEditsDirFailOnWrite() throws IOException {
assertTrue(doAnEdit());
invalidateEditsDirAtIndex(0,true,true);
invalidateEditsDirAtIndex(1,true,true);
try {
doAnEdit();
fail("The previous edit could not be synced to any persistent storage, " + " should have halted the NN");
}
catch ( RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
GenericTestUtils.assertExceptionContains("Could not sync enough journals to persistent storage due to " + "No journals available to flush. " + "Unsynced transactions: 1",re);
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
@Test public void testSingleRequiredFailedEditsDirOnSetReadyToFlush() throws IOException {
String[] editsDirs=cluster.getConfiguration(0).getTrimmedStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
shutDownMiniCluster();
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY,editsDirs[0]);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY,0);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY,0);
setUpMiniCluster(conf,true);
assertTrue(doAnEdit());
invalidateEditsDirAtIndex(0,false,false);
JournalAndStream nonRequiredJas=getJournalAndStream(1);
EditLogFileOutputStream nonRequiredSpy=spyOnStream(nonRequiredJas);
assertTrue(nonRequiredJas.isActive());
try {
doAnEdit();
fail("A single failure of a required journal should have halted the NN");
}
catch ( RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
GenericTestUtils.assertExceptionContains("setReadyToFlush failed for required journal",re);
}
Mockito.verify(nonRequiredSpy,Mockito.never()).setReadyToFlush();
assertFalse(nonRequiredJas.isActive());
}
UtilityVerifierBooleanVerifierHybridVerifier
@Test public void testAllEditsDirsFailOnFlush() throws IOException {
assertTrue(doAnEdit());
invalidateEditsDirAtIndex(0,true,false);
invalidateEditsDirAtIndex(1,true,false);
try {
doAnEdit();
fail("The previous edit could not be synced to any persistent storage, " + "should have halted the NN");
}
catch ( RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
GenericTestUtils.assertExceptionContains("Could not sync enough journals to persistent storage. " + "Unsynced transactions: 1",re);
}
}
UtilityVerifierBooleanVerifierHybridVerifier
@Test public void testMultipleRedundantFailedEditsDirOnSetReadyToFlush() throws IOException {
shutDownMiniCluster();
Configuration conf=new HdfsConfiguration();
String[] nameDirs=new String[4];
for (int i=0; i < nameDirs.length; i++) {
File nameDir=new File(PathUtils.getTestDir(getClass()),"name-dir" + i);
nameDir.mkdirs();
nameDirs[i]=nameDir.getAbsolutePath();
}
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,StringUtils.join(nameDirs,","));
conf.setInt(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY,2);
setUpMiniCluster(conf,false);
assertTrue(doAnEdit());
invalidateEditsDirAtIndex(0,false,false);
assertTrue(doAnEdit());
invalidateEditsDirAtIndex(1,false,false);
assertTrue(doAnEdit());
invalidateEditsDirAtIndex(2,false,false);
try {
doAnEdit();
fail("A failure of more than the minimum number of redundant journals " + "should have halted ");
}
catch ( RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
GenericTestUtils.assertExceptionContains("Could not sync enough journals to persistent storage due to " + "setReadyToFlush failed for too many journals. " + "Unsynced transactions: 1",re);
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* Tests rolling edit logs while transactions are ongoing.
*/
@Test public void testEditLogRolling() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
AtomicReference caughtErr=new AtomicReference();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
FSImage fsimage=namesystem.getFSImage();
StorageDirectory sd=fsimage.getStorage().getStorageDir(0);
startTransactionWorkers(namesystem,caughtErr);
long previousLogTxId=1;
for (int i=0; i < NUM_ROLLS && caughtErr.get() == null; i++) {
try {
Thread.sleep(20);
}
catch ( InterruptedException e) {
}
LOG.info("Starting roll " + i + ".");
CheckpointSignature sig=namesystem.rollEditLog();
long nextLog=sig.curSegmentTxId;
String logFileName=NNStorage.getFinalizedEditsFileName(previousLogTxId,nextLog - 1);
previousLogTxId+=verifyEditLogs(namesystem,fsimage,logFileName,previousLogTxId);
assertEquals(previousLogTxId,nextLog);
File expectedLog=NNStorage.getInProgressEditsFile(sd,previousLogTxId);
assertTrue("Expect " + expectedLog + " to exist",expectedLog.exists());
}
}
finally {
stopTransactionWorkers();
if (caughtErr.get() != null) {
throw new RuntimeException(caughtErr.get());
}
if (fileSys != null) fileSys.close();
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* The logSync() method in FSEditLog is unsynchronized whiel syncing
* so that other threads can concurrently enqueue edits while the prior
* sync is ongoing. This test checks that the log is saved correctly
* if the saveImage occurs while the syncing thread is in the unsynchronized middle section.
* This replicates the following manual test proposed by Konstantin:
* I start the name-node in debugger.
* I do -mkdir and stop the debugger in logSync() just before it does flush.
* Then I enter safe mode with another client
* I start saveNamepsace and stop the debugger in
* FSImage.saveFSImage() -> FSEditLog.createEditLogFile()
* -> EditLogFileOutputStream.create() ->
* after truncating the file but before writing LAYOUT_VERSION into it.
* Then I let logSync() run.
* Then I terminate the name-node.
* After that the name-node wont start, since the edits file is broken.
*/
@Test public void testSaveImageWhileSyncInProgress() throws Exception {
Configuration conf=getConf();
NameNode.initMetrics(conf,NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
final FSNamesystem namesystem=FSNamesystem.loadFromDisk(conf);
try {
FSImage fsimage=namesystem.getFSImage();
FSEditLog editLog=fsimage.getEditLog();
JournalAndStream jas=editLog.getJournals().get(0);
EditLogFileOutputStream spyElos=spy((EditLogFileOutputStream)jas.getCurrentStream());
jas.setCurrentStreamForTests(spyElos);
final AtomicReference deferredException=new AtomicReference();
final CountDownLatch waitToEnterFlush=new CountDownLatch(1);
final Thread doAnEditThread=new Thread(){
@Override public void run(){
try {
LOG.info("Starting mkdirs");
namesystem.mkdirs("/test",new PermissionStatus("test","test",new FsPermission((short)00755)),true);
LOG.info("mkdirs complete");
}
catch ( Throwable ioe) {
LOG.fatal("Got exception",ioe);
deferredException.set(ioe);
waitToEnterFlush.countDown();
}
}
}
;
Answer blockingFlush=new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
LOG.info("Flush called");
if (Thread.currentThread() == doAnEditThread) {
LOG.info("edit thread: Telling main thread we made it to flush section...");
waitToEnterFlush.countDown();
LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs");
Thread.sleep(BLOCK_TIME * 1000);
LOG.info("Going through to flush. This will allow the main thread to continue.");
}
invocation.callRealMethod();
LOG.info("Flush complete");
return null;
}
}
;
doAnswer(blockingFlush).when(spyElos).flush();
doAnEditThread.start();
LOG.info("Main thread: waiting to enter flush...");
waitToEnterFlush.await();
assertNull(deferredException.get());
LOG.info("Main thread: detected that logSync is in unsynchronized section.");
LOG.info("Trying to enter safe mode.");
LOG.info("This should block for " + BLOCK_TIME + "sec, since flush will sleep that long");
long st=Time.now();
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
long et=Time.now();
LOG.info("Entered safe mode");
assertTrue(et - st > (BLOCK_TIME - 1) * 1000);
namesystem.saveNamespace();
LOG.info("Joining on edit thread...");
doAnEditThread.join();
assertNull(deferredException.get());
assertEquals(3,verifyEditLogs(namesystem,fsimage,NNStorage.getFinalizedEditsFileName(1,3),1));
assertEquals(1,verifyEditLogs(namesystem,fsimage,NNStorage.getInProgressEditsFileName(4),4));
}
finally {
LOG.info("Closing namesystem");
if (namesystem != null) namesystem.close();
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Most of the FSNamesystem methods have a synchronized section where they
* update the name system itself and write to the edit log, and then
* unsynchronized, they call logSync. This test verifies that, if an
* operation has written to the edit log but not yet synced it,
* we wait for that sync before entering safe mode.
*/
@Test public void testSaveRightBeforeSync() throws Exception {
Configuration conf=getConf();
NameNode.initMetrics(conf,NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
final FSNamesystem namesystem=FSNamesystem.loadFromDisk(conf);
try {
FSImage fsimage=namesystem.getFSImage();
FSEditLog editLog=spy(fsimage.getEditLog());
fsimage.editLog=editLog;
final AtomicReference deferredException=new AtomicReference();
final CountDownLatch waitToEnterSync=new CountDownLatch(1);
final Thread doAnEditThread=new Thread(){
@Override public void run(){
try {
LOG.info("Starting mkdirs");
namesystem.mkdirs("/test",new PermissionStatus("test","test",new FsPermission((short)00755)),true);
LOG.info("mkdirs complete");
}
catch ( Throwable ioe) {
LOG.fatal("Got exception",ioe);
deferredException.set(ioe);
waitToEnterSync.countDown();
}
}
}
;
Answer blockingSync=new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
LOG.info("logSync called");
if (Thread.currentThread() == doAnEditThread) {
LOG.info("edit thread: Telling main thread we made it just before logSync...");
waitToEnterSync.countDown();
LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs");
Thread.sleep(BLOCK_TIME * 1000);
LOG.info("Going through to logSync. This will allow the main thread to continue.");
}
invocation.callRealMethod();
LOG.info("logSync complete");
return null;
}
}
;
doAnswer(blockingSync).when(editLog).logSync();
doAnEditThread.start();
LOG.info("Main thread: waiting to just before logSync...");
waitToEnterSync.await();
assertNull(deferredException.get());
LOG.info("Main thread: detected that logSync about to be called.");
LOG.info("Trying to enter safe mode.");
LOG.info("This should block for " + BLOCK_TIME + "sec, since we have pending edits");
long st=Time.now();
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
long et=Time.now();
LOG.info("Entered safe mode");
assertTrue(et - st > (BLOCK_TIME - 1) * 1000);
namesystem.saveNamespace();
LOG.info("Joining on edit thread...");
doAnEditThread.join();
assertNull(deferredException.get());
assertEquals(3,verifyEditLogs(namesystem,fsimage,NNStorage.getFinalizedEditsFileName(1,3),1));
assertEquals(1,verifyEditLogs(namesystem,fsimage,NNStorage.getInProgressEditsFileName(4),4));
}
finally {
LOG.info("Closing namesystem");
if (namesystem != null) namesystem.close();
}
}
IterativeVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=300000) public void testXAttrMultiAddRemoveErrors() throws Exception {
List existingXAttrs=Lists.newArrayList();
List toAdd=Lists.newArrayList();
toAdd.add(generatedXAttrs.get(0));
toAdd.add(generatedXAttrs.get(1));
toAdd.add(generatedXAttrs.get(2));
toAdd.add(generatedXAttrs.get(0));
try {
fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE));
fail("Specified the same xattr to be set twice");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Cannot specify the same " + "XAttr to be set",e);
}
toAdd.remove(generatedXAttrs.get(0));
existingXAttrs.add(generatedXAttrs.get(0));
try {
fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE));
fail("Set XAttr that is already set without REPLACE flag");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("already exists",e);
}
try {
fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.REPLACE));
fail("Set XAttr that does not exist without the CREATE flag");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("does not exist",e);
}
toAdd.remove(generatedXAttrs.get(0));
List newXAttrs=fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE));
assertEquals("Unexpected toAdd size",2,toAdd.size());
for ( XAttr x : toAdd) {
assertTrue("Did not find added XAttr " + x,newXAttrs.contains(x));
}
existingXAttrs=newXAttrs;
toAdd=Lists.newArrayList();
for (int i=0; i < 3; i++) {
XAttr xAttr=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.SYSTEM).setName("a" + i).setValue(new byte[]{(byte)(i * 2)}).build();
toAdd.add(xAttr);
}
newXAttrs=fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.REPLACE));
assertEquals("Unexpected number of new XAttrs",3,newXAttrs.size());
for (int i=0; i < 3; i++) {
assertArrayEquals("Unexpected XAttr value",new byte[]{(byte)(i * 2)},newXAttrs.get(i).getValue());
}
existingXAttrs=newXAttrs;
toAdd=Lists.newArrayList();
for (int i=0; i < 4; i++) {
toAdd.add(generatedXAttrs.get(i));
}
newXAttrs=fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
verifyXAttrsPresent(newXAttrs,4);
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testINodeXAttrsLimit() throws Exception {
List existingXAttrs=Lists.newArrayListWithCapacity(2);
XAttr xAttr1=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).setName("a1").setValue(new byte[]{0x31,0x32,0x33}).build();
XAttr xAttr2=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).setName("a2").setValue(new byte[]{0x31,0x31,0x31}).build();
existingXAttrs.add(xAttr1);
existingXAttrs.add(xAttr2);
XAttr newSystemXAttr=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.SYSTEM).setName("a3").setValue(new byte[]{0x33,0x33,0x33}).build();
XAttr newRawXAttr=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.RAW).setName("a3").setValue(new byte[]{0x33,0x33,0x33}).build();
List newXAttrs=Lists.newArrayListWithCapacity(2);
newXAttrs.add(newSystemXAttr);
newXAttrs.add(newRawXAttr);
List xAttrs=fsdir.setINodeXAttrs(existingXAttrs,newXAttrs,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
assertEquals(xAttrs.size(),4);
XAttr newXAttr1=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.TRUSTED).setName("a4").setValue(new byte[]{0x34,0x34,0x34}).build();
newXAttrs.set(0,newXAttr1);
try {
fsdir.setINodeXAttrs(existingXAttrs,newXAttrs,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
fail("Setting user visible xattr on inode should fail if " + "reaching limit.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Cannot add additional XAttr " + "to inode, would exceed limit",e);
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* Test when there is snapshot taken on root
*/
@Test public void testSnapshotOnRoot() throws Exception {
final Path root=new Path("/");
hdfs.allowSnapshot(root);
hdfs.createSnapshot(root,"s1");
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn=cluster.getNamesystem();
hdfs=cluster.getFileSystem();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn=cluster.getNamesystem();
hdfs=cluster.getFileSystem();
INodeDirectory rootNode=fsn.dir.getINode4Write(root.toString()).asDirectory();
assertTrue("The children list of root should be empty",rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
List diffList=rootNode.getDiffs().asList();
assertEquals(1,diffList.size());
Snapshot s1=rootNode.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(),diffList.get(0).getSnapshotId());
assertEquals(1,fsn.getSnapshotManager().getNumSnapshottableDirs());
SnapshottableDirectoryStatus[] sdirs=fsn.getSnapshotManager().getSnapshottableDirListing(null);
assertEquals(root,sdirs[0].getFullPath());
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn=cluster.getNamesystem();
hdfs=cluster.getFileSystem();
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Make sure that in-progress streams aren't counted if we don't ask for
* them.
*/
@Test public void testExcludeInProgressStreams() throws CorruptionException, IOException {
File f=new File(TestEditLog.TEST_DIR + "/excludeinprogressstreams");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10,false);
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(100,getNumberOfTransactions(jm,1,false,false));
EditLogInputStream elis=getJournalInputStream(jm,90,false);
try {
FSEditLogOp lastReadOp=null;
while ((lastReadOp=elis.readOp()) != null) {
assertTrue(lastReadOp.getTransactionId() <= 100);
}
}
finally {
IOUtils.cleanup(LOG,elis);
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that we receive the correct number of transactions when we count
* the number of transactions around gaps.
* Set up a single edits directory, with no failures. Delete the 4th logfile.
* Test that getNumberOfTransactions returns the correct number of
* transactions before this gap and after this gap. Also verify that if you
* try to count on the gap that an exception is thrown.
*/
@Test public void testManyLogsWithGaps() throws IOException {
File f=new File(TestEditLog.TEST_DIR + "/manylogswithgaps");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10);
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
final long startGapTxId=3 * TXNS_PER_ROLL + 1;
final long endGapTxId=4 * TXNS_PER_ROLL;
File[] files=new File(f,"current").listFiles(new FilenameFilter(){
@Override public boolean accept( File dir, String name){
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId,endGapTxId))) {
return true;
}
return false;
}
}
);
assertEquals(1,files.length);
assertTrue(files[0].delete());
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(startGapTxId - 1,getNumberOfTransactions(jm,1,true,true));
assertEquals(0,getNumberOfTransactions(jm,startGapTxId,true,true));
assertEquals(11 * TXNS_PER_ROLL - endGapTxId,getNumberOfTransactions(jm,endGapTxId + 1,true,true));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that the concat operation is properly persisted in the
* edit log, and properly replayed on restart.
*/
@Test public void testConcatInEditLog() throws Exception {
final Path TEST_DIR=new Path("/testConcatInEditLog");
final long FILE_LEN=blockSize;
Path[] srcFiles=new Path[3];
for (int i=0; i < srcFiles.length; i++) {
Path path=new Path(TEST_DIR,"src-" + i);
DFSTestUtil.createFile(dfs,path,FILE_LEN,REPL_FACTOR,1);
srcFiles[i]=path;
}
Path targetFile=new Path(TEST_DIR,"target");
DFSTestUtil.createFile(dfs,targetFile,FILE_LEN,REPL_FACTOR,1);
dfs.concat(targetFile,srcFiles);
assertTrue(dfs.exists(targetFile));
FileStatus origStatus=dfs.getFileStatus(targetFile);
cluster.restartNameNode(true);
assertTrue(dfs.exists(targetFile));
assertFalse(dfs.exists(srcFiles[0]));
FileStatus statusAfterRestart=dfs.getFileStatus(targetFile);
assertEquals(origStatus.getModificationTime(),statusAfterRestart.getModificationTime());
}
IterativeVerifierUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Concatenates 10 files into one
* Verifies the final size, deletion of the file, number of blocks
* @throws IOException
*/
@Test public void testConcat() throws IOException, InterruptedException {
final int numFiles=10;
long fileLen=blockSize * 3;
HdfsFileStatus fStatus;
FSDataInputStream stm;
String trg=new String("/trg");
Path trgPath=new Path(trg);
DFSTestUtil.createFile(dfs,trgPath,fileLen,REPL_FACTOR,1);
fStatus=nn.getFileInfo(trg);
long trgLen=fStatus.getLen();
long trgBlocks=nn.getBlockLocations(trg,0,trgLen).locatedBlockCount();
Path[] files=new Path[numFiles];
byte[][] bytes=new byte[numFiles][(int)fileLen];
LocatedBlocks[] lblocks=new LocatedBlocks[numFiles];
long[] lens=new long[numFiles];
int i=0;
for (i=0; i < files.length; i++) {
files[i]=new Path("/file" + i);
Path path=files[i];
System.out.println("Creating file " + path);
DFSTestUtil.createFile(dfs,path,fileLen,REPL_FACTOR,1);
fStatus=nn.getFileInfo(path.toUri().getPath());
lens[i]=fStatus.getLen();
assertEquals(trgLen,lens[i]);
lblocks[i]=nn.getBlockLocations(path.toUri().getPath(),0,lens[i]);
stm=dfs.open(path);
stm.readFully(0,bytes[i]);
stm.close();
}
final UserGroupInformation user1=UserGroupInformation.createUserForTesting("theDoctor",new String[]{"tardis"});
DistributedFileSystem hdfs=(DistributedFileSystem)DFSTestUtil.getFileSystemAs(user1,conf);
try {
hdfs.concat(trgPath,files);
fail("Permission exception expected");
}
catch ( IOException ie) {
System.out.println("Got expected exception for permissions:" + ie.getLocalizedMessage());
}
ContentSummary cBefore=dfs.getContentSummary(trgPath.getParent());
dfs.concat(trgPath,files);
ContentSummary cAfter=dfs.getContentSummary(trgPath.getParent());
assertEquals(cBefore.getFileCount(),cAfter.getFileCount() + files.length);
long totalLen=trgLen;
long totalBlocks=trgBlocks;
for (i=0; i < files.length; i++) {
totalLen+=lens[i];
totalBlocks+=lblocks[i].locatedBlockCount();
}
System.out.println("total len=" + totalLen + "; totalBlocks="+ totalBlocks);
fStatus=nn.getFileInfo(trg);
trgLen=fStatus.getLen();
stm=dfs.open(trgPath);
byte[] byteFileConcat=new byte[(int)trgLen];
stm.readFully(0,byteFileConcat);
stm.close();
trgBlocks=nn.getBlockLocations(trg,0,trgLen).locatedBlockCount();
assertEquals(trgBlocks,totalBlocks);
assertEquals(trgLen,totalLen);
for ( Path p : files) {
fStatus=nn.getFileInfo(p.toUri().getPath());
assertNull("File " + p + " still exists",fStatus);
DFSTestUtil.createFile(dfs,p,fileLen,REPL_FACTOR,1);
}
checkFileContent(byteFileConcat,bytes);
Path smallFile=new Path("/sfile");
int sFileLen=10;
DFSTestUtil.createFile(dfs,smallFile,sFileLen,REPL_FACTOR,1);
dfs.concat(trgPath,new Path[]{smallFile});
fStatus=nn.getFileInfo(trg);
trgLen=fStatus.getLen();
trgBlocks=nn.getBlockLocations(trg,0,trgLen).locatedBlockCount();
assertEquals(trgBlocks,totalBlocks + 1);
assertEquals(trgLen,totalLen + sFileLen);
}
TestInitializerInternalCallVerifierNullVerifierHybridVerifier
@Before public void startUpCluster() throws IOException {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build();
assertNotNull("Failed Cluster Creation",cluster);
cluster.waitClusterUp();
dfs=cluster.getFileSystem();
assertNotNull("Failed to get FileSystem",dfs);
nn=cluster.getNameNodeRpc();
assertNotNull("Failed to get NameNode",nn);
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
/**
* Test for the static {@link INodeFile#valueOf(INode,String)}and {@link INodeFileUnderConstruction#valueOf(INode,String)} methods.
* @throws IOException
*/
@Test public void testValueOf() throws IOException {
final String path="/testValueOf";
final short replication=3;
{
final INode from=null;
try {
INodeFile.valueOf(from,path);
fail();
}
catch ( FileNotFoundException fnfe) {
assertTrue(fnfe.getMessage().contains("File does not exist"));
}
try {
INodeDirectory.valueOf(from,path);
fail();
}
catch ( FileNotFoundException e) {
assertTrue(e.getMessage().contains("Directory does not exist"));
}
}
{
final INode from=createINodeFile(replication,preferredBlockSize);
final INodeFile f=INodeFile.valueOf(from,path);
assertTrue(f == from);
try {
INodeDirectory.valueOf(from,path);
fail();
}
catch ( PathIsNotDirectoryException e) {
}
}
{
final INode from=new INodeFile(INodeId.GRANDFATHER_INODE_ID,null,perm,0L,0L,null,replication,1024L);
from.asFile().toUnderConstruction("client","machine");
final INodeFile f=INodeFile.valueOf(from,path);
assertTrue(f == from);
try {
INodeDirectory.valueOf(from,path);
fail();
}
catch ( PathIsNotDirectoryException expected) {
}
}
{
final INode from=new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,null,perm,0L);
try {
INodeFile.valueOf(from,path);
fail();
}
catch ( FileNotFoundException fnfe) {
assertTrue(fnfe.getMessage().contains("Path is not a file"));
}
final INodeDirectory d=INodeDirectory.valueOf(from,path);
assertTrue(d == from);
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierIdentityVerifierHybridVerifier
/**
* Test whether the inode in inodeMap has been replaced after regular inode
* replacement
*/
@Test public void testInodeReplacement() throws Exception {
final Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem hdfs=cluster.getFileSystem();
final FSDirectory fsdir=cluster.getNamesystem().getFSDirectory();
final Path dir=new Path("/dir");
hdfs.mkdirs(dir);
INodeDirectory dirNode=getDir(fsdir,dir);
INode dirNodeFromNode=fsdir.getInode(dirNode.getId());
assertSame(dirNode,dirNodeFromNode);
hdfs.setQuota(dir,Long.MAX_VALUE - 1,Long.MAX_VALUE - 1);
dirNode=getDir(fsdir,dir);
assertTrue(dirNode.isWithQuota());
dirNodeFromNode=fsdir.getInode(dirNode.getId());
assertSame(dirNode,dirNodeFromNode);
hdfs.setQuota(dir,-1,-1);
dirNode=getDir(fsdir,dir);
dirNodeFromNode=fsdir.getInode(dirNode.getId());
assertSame(dirNode,dirNodeFromNode);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* FSDirectory#unprotectedSetQuota creates a new INodeDirectoryWithQuota to
* replace the original INodeDirectory. Before HDFS-4243, the parent field of
* all the children INodes of the target INodeDirectory is not changed to
* point to the new INodeDirectoryWithQuota. This testcase tests this
* scenario.
*/
@Test public void testGetFullPathNameAfterSetQuota() throws Exception {
long fileLen=1024;
replication=3;
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(replication).build();
cluster.waitActive();
FSNamesystem fsn=cluster.getNamesystem();
FSDirectory fsdir=fsn.getFSDirectory();
DistributedFileSystem dfs=cluster.getFileSystem();
final Path dir=new Path("/dir");
final Path file=new Path(dir,"file");
DFSTestUtil.createFile(dfs,file,fileLen,replication,0L);
INode fnode=fsdir.getINode(file.toString());
assertEquals(file.toString(),fnode.getFullPathName());
dfs.setQuota(dir,Long.MAX_VALUE - 1,replication * fileLen * 10);
INodeDirectory dirNode=getDir(fsdir,dir);
assertEquals(dir.toString(),dirNode.getFullPathName());
assertTrue(dirNode.isWithQuota());
final Path newDir=new Path("/newdir");
final Path newFile=new Path(newDir,"file");
dfs.rename(dir,newDir,Options.Rename.OVERWRITE);
fnode=fsdir.getINode(newFile.toString());
assertEquals(newFile.toString(),fnode.getFullPathName());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
/**
* Test various configuration options of dfs.namenode.name.dir and dfs.namenode.edits.dir
* This test tries to simulate failure scenarios.
* 1. Start cluster with shared name and edits dir
* 2. Restart cluster by adding separate name and edits dirs
* 3. Restart cluster by removing shared name and edits dir
* 4. Restart cluster with old shared name and edits dir, but only latest
* name dir. This should fail since we don't have latest edits dir
* 5. Restart cluster with old shared name and edits dir, but only latest
* edits dir. This should succeed since the latest edits will have
* segments leading all the way from the image in name_and_edits.
*/
@Test public void testNameEditsConfigsFailure() throws IOException {
Path file1=new Path("TestNameEditsConfigs1");
Path file2=new Path("TestNameEditsConfigs2");
Path file3=new Path("TestNameEditsConfigs3");
MiniDFSCluster cluster=null;
Configuration conf=null;
FileSystem fileSys=null;
File nameOnlyDir=new File(base_dir,"name");
File editsOnlyDir=new File(base_dir,"edits");
File nameAndEditsDir=new File(base_dir,"name_and_edits");
conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameAndEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameAndEditsDir.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).manageNameDfsDirs(false).build();
cluster.waitActive();
assertTrue(new File(nameAndEditsDir,"current/VERSION").exists());
fileSys=cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
DFSTestUtil.createFile(fileSys,file1,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED);
checkFile(fileSys,file1,replication);
}
finally {
fileSys.close();
cluster.shutdown();
}
conf=new HdfsConfiguration();
assertTrue(nameOnlyDir.mkdir());
assertTrue(editsOnlyDir.mkdir());
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameAndEditsDir.getPath() + "," + nameOnlyDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameAndEditsDir.getPath() + "," + editsOnlyDir.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
cluster.waitActive();
assertTrue(new File(nameAndEditsDir,"current/VERSION").exists());
assertTrue(new File(nameOnlyDir,"current/VERSION").exists());
assertTrue(new File(editsOnlyDir,"current/VERSION").exists());
fileSys=cluster.getFileSystem();
assertTrue(fileSys.exists(file1));
checkFile(fileSys,file1,replication);
cleanupFile(fileSys,file1);
DFSTestUtil.createFile(fileSys,file2,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED);
checkFile(fileSys,file2,replication);
}
finally {
fileSys.close();
cluster.shutdown();
}
try {
conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameOnlyDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,editsOnlyDir.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
assertFalse(fileSys.exists(file1));
assertTrue(fileSys.exists(file2));
checkFile(fileSys,file2,replication);
cleanupFile(fileSys,file2);
DFSTestUtil.createFile(fileSys,file3,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED);
checkFile(fileSys,file3,replication);
}
finally {
fileSys.close();
cluster.shutdown();
}
conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameOnlyDir.getPath() + "," + nameAndEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameAndEditsDir.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
fail("Successfully started cluster but should not have been able to.");
}
catch ( IOException e) {
LOG.info("EXPECTED: cluster start failed due to missing " + "latest edits dir",e);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
cluster=null;
}
conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameAndEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,editsOnlyDir.getPath() + "," + nameAndEditsDir.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
fileSys=cluster.getFileSystem();
assertFalse(fileSys.exists(file1));
assertFalse(fileSys.exists(file2));
assertTrue(fileSys.exists(file3));
checkFile(fileSys,file3,replication);
cleanupFile(fileSys,file3);
DFSTestUtil.createFile(fileSys,file3,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED);
checkFile(fileSys,file3,replication);
}
finally {
fileSys.close();
cluster.shutdown();
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test for crateSnapshot
*/
@Test public void testSnapshotMethods() throws Exception {
String dir="/testNamenodeRetryCache/testCreateSnapshot/src";
resetCall();
namesystem.mkdirs(dir,perm,true);
namesystem.allowSnapshot(dir);
newCall();
String name=namesystem.createSnapshot(dir,"snap1");
Assert.assertEquals(name,namesystem.createSnapshot(dir,"snap1"));
Assert.assertEquals(name,namesystem.createSnapshot(dir,"snap1"));
Assert.assertEquals(name,namesystem.createSnapshot(dir,"snap1"));
newCall();
try {
namesystem.createSnapshot(dir,"snap1");
Assert.fail("testSnapshotMethods expected exception is not thrown");
}
catch ( IOException e) {
}
newCall();
namesystem.renameSnapshot(dir,"snap1","snap2");
namesystem.renameSnapshot(dir,"snap1","snap2");
namesystem.renameSnapshot(dir,"snap1","snap2");
newCall();
try {
namesystem.renameSnapshot(dir,"snap1","snap2");
Assert.fail("testSnapshotMethods expected exception is not thrown");
}
catch ( IOException e) {
}
newCall();
namesystem.deleteSnapshot(dir,"snap2");
namesystem.deleteSnapshot(dir,"snap2");
namesystem.deleteSnapshot(dir,"snap2");
newCall();
try {
namesystem.deleteSnapshot(dir,"snap2");
Assert.fail("testSnapshotMethods expected exception is not thrown");
}
catch ( IOException e) {
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* After run a set of operations, restart NN and check if the retry cache has
* been rebuilt based on the editlog.
*/
@Test public void testRetryCacheRebuild() throws Exception {
DFSTestUtil.runOperations(cluster,filesystem,conf,BlockSize,0);
LightWeightCache cacheSet=(LightWeightCache)namesystem.getRetryCache().getCacheSet();
assertEquals(23,cacheSet.size());
Map oldEntries=new HashMap();
Iterator iter=cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry=iter.next();
oldEntries.put(entry,entry);
}
cluster.restartNameNode();
cluster.waitActive();
namesystem=cluster.getNamesystem();
assertTrue(namesystem.hasRetryCache());
cacheSet=(LightWeightCache)namesystem.getRetryCache().getCacheSet();
assertEquals(23,cacheSet.size());
iter=cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry=iter.next();
assertTrue(oldEntries.containsKey(entry));
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test for create file
*/
@Test public void testCreate() throws Exception {
String src="/testNamenodeRetryCache/testCreate/file";
newCall();
HdfsFileStatus status=namesystem.startFile(src,perm,"holder","clientmachine",EnumSet.of(CreateFlag.CREATE),true,(short)1,BlockSize,null);
Assert.assertEquals(status,namesystem.startFile(src,perm,"holder","clientmachine",EnumSet.of(CreateFlag.CREATE),true,(short)1,BlockSize,null));
Assert.assertEquals(status,namesystem.startFile(src,perm,"holder","clientmachine",EnumSet.of(CreateFlag.CREATE),true,(short)1,BlockSize,null));
newCall();
try {
namesystem.startFile(src,perm,"holder","clientmachine",EnumSet.of(CreateFlag.CREATE),true,(short)1,BlockSize,null);
Assert.fail("testCreate - expected exception is not thrown");
}
catch ( IOException e) {
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test for rename1
*/
@Test public void testAppend() throws Exception {
String src="/testNamenodeRetryCache/testAppend/src";
resetCall();
DFSTestUtil.createFile(filesystem,new Path(src),128,(short)1,0L);
newCall();
LocatedBlock b=namesystem.appendFile(src,"holder","clientMachine");
Assert.assertEquals(b,namesystem.appendFile(src,"holder","clientMachine"));
Assert.assertEquals(b,namesystem.appendFile(src,"holder","clientMachine"));
newCall();
try {
namesystem.appendFile(src,"holder","clientMachine");
Assert.fail("testAppend - expected exception is not thrown");
}
catch ( Exception e) {
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testXattrConfiguration() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
try {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,-1);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
fail("Expected exception with negative xattr size");
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Cannot set a negative value for the maximum size of an xattr",e);
}
finally {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT);
if (cluster != null) {
cluster.shutdown();
}
}
try {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,-1);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
fail("Expected exception with negative # xattrs per inode");
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Cannot set a negative limit on the number of xattrs per inode",e);
}
finally {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT);
if (cluster != null) {
cluster.shutdown();
}
}
try {
final LogVerificationAppender appender=new LogVerificationAppender();
final Logger logger=Logger.getRootLogger();
logger.addAppender(appender);
int count=appender.countLinesWithMessage("Maximum size of an xattr: 0 (unlimited)");
assertEquals("Expected no messages about unlimited xattr size",0,count);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,0);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
count=appender.countLinesWithMessage("Maximum size of an xattr: 0 (unlimited)");
assertEquals("Expected unlimited xattr size",2,count);
}
finally {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT);
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* This test tests hosts include list contains host names. After namenode
* restarts, the still alive datanodes should not have any trouble in getting
* registrant again.
*/
@Test public void testNNRestart() throws IOException, InterruptedException {
MiniDFSCluster cluster=null;
FileSystem localFileSys;
Path hostsFile;
Path excludeFile;
int HEARTBEAT_INTERVAL=1;
localFileSys=FileSystem.getLocal(config);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,"build/test/data/work-dir/restartnn");
hostsFile=new Path(dir,"hosts");
excludeFile=new Path(dir,"exclude");
config.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath());
writeConfigFile(localFileSys,excludeFile,null);
config.set(DFSConfigKeys.DFS_HOSTS,hostsFile.toUri().getPath());
ArrayList list=new ArrayList();
byte b[]={127,0,0,1};
InetAddress inetAddress=InetAddress.getByAddress(b);
list.add(inetAddress.getHostName());
writeConfigFile(localFileSys,hostsFile,list);
int numDatanodes=1;
try {
cluster=new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).setupHostsFile(true).build();
cluster.waitActive();
cluster.restartNameNode();
NamenodeProtocols nn=cluster.getNameNodeRpc();
assertNotNull(nn);
assertTrue(cluster.isDataNodeUp());
DatanodeInfo[] info=nn.getDatanodeReport(DatanodeReportType.LIVE);
for (int i=0; i < 5 && info.length != numDatanodes; i++) {
Thread.sleep(HEARTBEAT_INTERVAL * 1000);
info=nn.getDatanodeReport(DatanodeReportType.LIVE);
}
assertEquals("Number of live nodes should be " + numDatanodes,numDatanodes,info.length);
}
catch ( IOException e) {
fail(StringUtils.stringifyException(e));
throw e;
}
finally {
cleanupFile(localFileSys,excludeFile.getParent());
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifierBooleanVerifierHybridVerifier
/**
* Regression test for HDFS-1997. Test that, if an exception
* occurs on the client side, it is properly reported as such,
* and reported to the associated NNStorage object.
*/
@Test public void testClientSideException() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
NNStorage mockStorage=Mockito.mock(NNStorage.class);
List localPath=Collections.singletonList(new File("/xxxxx-does-not-exist/blah"));
try {
URL fsName=DFSUtil.getInfoServer(cluster.getNameNode().getServiceRpcAddress(),conf,DFSUtil.getHttpClientScheme(conf)).toURL();
String id="getimage=1&txid=0";
TransferFsImage.getFileClient(fsName,id,localPath,mockStorage,false);
fail("Didn't get an exception!");
}
catch ( IOException ioe) {
Mockito.verify(mockStorage).reportErrorOnFile(localPath.get(0));
assertTrue("Unexpected exception: " + StringUtils.stringifyException(ioe),ioe.getMessage().contains("Unable to download to any storage"));
}
finally {
cluster.shutdown();
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Test to verify the timeout of Image upload
*/
@Test(timeout=10000) public void testImageUploadTimeout() throws Exception {
Configuration conf=new HdfsConfiguration();
NNStorage mockStorage=Mockito.mock(NNStorage.class);
HttpServer2 testServer=HttpServerFunctionalTest.createServer("hdfs");
try {
testServer.addServlet("ImageTransfer",ImageServlet.PATH_SPEC,TestImageTransferServlet.class);
testServer.start();
URL serverURL=HttpServerFunctionalTest.getServerURL(testServer);
TransferFsImage.timeout=2000;
File tmpDir=new File(new FileSystemTestHelper().getTestRootDir());
tmpDir.mkdirs();
File mockImageFile=File.createTempFile("image","",tmpDir);
FileOutputStream imageFile=new FileOutputStream(mockImageFile);
imageFile.write("data".getBytes());
imageFile.close();
Mockito.when(mockStorage.findImageFile(Mockito.any(NameNodeFile.class),Mockito.anyLong())).thenReturn(mockImageFile);
Mockito.when(mockStorage.toColonSeparatedString()).thenReturn("storage:info:string");
try {
TransferFsImage.uploadImageFromStorage(serverURL,conf,mockStorage,NameNodeFile.IMAGE,1L);
fail("TransferImage Should fail with timeout");
}
catch ( SocketTimeoutException e) {
assertEquals("Upload should timeout","Read timed out",e.getMessage());
}
}
finally {
testServer.stop();
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Test to verify the read timeout
*/
@Test(timeout=5000) public void testGetImageTimeout() throws Exception {
HttpServer2 testServer=HttpServerFunctionalTest.createServer("hdfs");
try {
testServer.addServlet("ImageTransfer",ImageServlet.PATH_SPEC,TestImageTransferServlet.class);
testServer.start();
URL serverURL=HttpServerFunctionalTest.getServerURL(testServer);
TransferFsImage.timeout=2000;
try {
TransferFsImage.getFileClient(serverURL,"txid=1",null,null,false);
fail("TransferImage Should fail with timeout");
}
catch ( SocketTimeoutException e) {
assertEquals("Read should timeout","Read timed out",e.getMessage());
}
}
finally {
if (testServer != null) {
testServer.stop();
}
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test for the case where the shared edits dir doesn't have
* all of the recent edit logs.
*/
@Test public void testSharedEditsMissingLogs() throws Exception {
removeStandbyNameDirs();
CheckpointSignature sig=nn0.getRpcServer().rollEditLog();
assertEquals(3,sig.getCurSegmentTxId());
URI editsUri=cluster.getSharedEditsDir(0,1);
File editsDir=new File(editsUri);
File editsSegment=new File(new File(editsDir,"current"),NNStorage.getFinalizedEditsFileName(1,2));
GenericTestUtils.assertExists(editsSegment);
assertTrue(editsSegment.delete());
LogCapturer logs=GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(BootstrapStandby.class));
try {
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE,rc);
}
finally {
logs.stopCapturing();
}
GenericTestUtils.assertMatches(logs.getOutput(),"FATAL.*Unable to read transaction ids 1-3 from the configured shared");
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test for the base success case. The primary NN
* hasn't made any checkpoints, and we copy the fsimage_0
* file over and start up.
*/
@Test public void testSuccessfulBaseCase() throws Exception {
removeStandbyNameDirs();
try {
cluster.restartNameNode(1);
fail("Did not throw");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("storage directory does not exist or is not accessible",ioe);
}
int rc=BootstrapStandby.run(new String[]{"-nonInteractive"},cluster.getConfiguration(1));
assertEquals(0,rc);
FSImageTestUtil.assertNNHasCheckpoints(cluster,1,ImmutableList.of(0));
FSImageTestUtil.assertNNFilesMatch(cluster);
cluster.restartNameNode(1);
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
/**
* Make sure that starting a second NN with the -upgrade flag fails if the
* other NN has already done that.
*/
@Test public void testCannotUpgradeSecondNameNode() throws IOException, URISyntaxException {
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
File sharedDir=new File(cluster.getSharedEditsDir(0,1));
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
checkPreviousDirExistence(sharedDir,false);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkPreviousDirExistence(sharedDir,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster.restartNameNode(0,false);
cluster.transitionToActive(0);
assertTrue(fs.mkdirs(new Path("/foo3")));
cluster.getNameNodeInfos()[1].setStartOpt(StartupOption.UPGRADE);
try {
cluster.restartNameNode(1,false);
fail("Should not have been able to start second NN with -upgrade");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("It looks like the shared log is already being upgraded",ioe);
}
}
finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Ensure that an admin cannot finalize an HA upgrade without at least one NN
* being active.
*/
@Test public void testCannotFinalizeIfNoActive() throws IOException, URISyntaxException {
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
File sharedDir=new File(cluster.getSharedEditsDir(0,1));
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
checkPreviousDirExistence(sharedDir,false);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkPreviousDirExistence(sharedDir,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster.restartNameNode(0,false);
cluster.transitionToActive(0);
assertTrue(fs.mkdirs(new Path("/foo3")));
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
assertTrue(fs.mkdirs(new Path("/foo4")));
assertCTimesEqual(cluster);
cluster.transitionToStandby(1);
try {
runFinalizeCommand(cluster);
fail("Should not have been able to finalize upgrade with no NN active");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Cannot finalize with no NameNode active",ioe);
}
}
finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Make sure that an HA NN with NFS-based HA can successfully start and
* upgrade.
*/
@Test public void testNfsUpgrade() throws IOException, URISyntaxException {
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
File sharedDir=new File(cluster.getSharedEditsDir(0,1));
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
checkPreviousDirExistence(sharedDir,false);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkPreviousDirExistence(sharedDir,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster.restartNameNode(0,false);
cluster.transitionToActive(0);
assertTrue(fs.mkdirs(new Path("/foo3")));
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
assertTrue(fs.mkdirs(new Path("/foo4")));
assertCTimesEqual(cluster);
}
finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Make sure that even if the NN which initiated the upgrade is in the standby
* state that we're allowed to finalize.
*/
@Test public void testFinalizeFromSecondNameNodeWithJournalNodes() throws IOException, URISyntaxException {
MiniQJMHACluster qjCluster=null;
FileSystem fs=null;
try {
Builder builder=new MiniQJMHACluster.Builder(conf);
builder.getDfsBuilder().numDataNodes(0);
qjCluster=builder.build();
MiniDFSCluster cluster=qjCluster.getDfsCluster();
checkJnPreviousDirExistence(qjCluster,false);
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkJnPreviousDirExistence(qjCluster,true);
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
runFinalizeCommand(cluster);
checkClusterPreviousDirExistence(cluster,false);
checkJnPreviousDirExistence(qjCluster,false);
assertCTimesEqual(cluster);
}
finally {
if (fs != null) {
fs.close();
}
if (qjCluster != null) {
qjCluster.shutdown();
}
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that marking the shared edits dir as being "required" causes the NN to
* fail if that dir can't be accessed.
*/
@Test public void testFailureOfSharedDir() throws Exception {
Configuration conf=new Configuration();
conf.setLong(DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,2000);
MiniDFSCluster cluster=null;
File sharedEditsDir=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).checkExitOnShutdown(false).build();
cluster.waitActive();
cluster.transitionToActive(0);
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/test1")));
URI sharedEditsUri=cluster.getSharedEditsDir(0,1);
sharedEditsDir=new File(sharedEditsUri);
assertEquals(0,FileUtil.chmod(sharedEditsDir.getAbsolutePath(),"-w",true));
Thread.sleep(conf.getLong(DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT) * 2);
NameNode nn1=cluster.getNameNode(1);
assertTrue(nn1.isStandbyState());
assertFalse("StandBy NameNode should not go to SafeMode on resource unavailability",nn1.isInSafeMode());
NameNode nn0=cluster.getNameNode(0);
try {
nn0.getRpcServer().rollEditLog();
fail("Succeeded in rolling edit log despite shared dir being deleted");
}
catch ( ExitException ee) {
GenericTestUtils.assertExceptionContains("finalize log segment 1, 3 failed for required journal",ee);
}
for ( URI editsUri : cluster.getNameEditsDirs(0)) {
if (editsUri.equals(sharedEditsUri)) {
continue;
}
File editsDir=new File(editsUri.getPath());
File curDir=new File(editsDir,"current");
GenericTestUtils.assertGlobEquals(curDir,"edits_.*",NNStorage.getInProgressEditsFileName(1));
}
}
finally {
if (sharedEditsDir != null) {
FileUtil.chmod(sharedEditsDir.getAbsolutePath(),"+w",true);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Multiple shared edits directories is an invalid configuration.
*/
@Test public void testMultipleSharedDirsFails() throws Exception {
Configuration conf=new Configuration();
URI sharedA=new URI("file:///shared-A");
URI sharedB=new URI("file:///shared-B");
URI localA=new URI("file:///local-A");
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,Joiner.on(",").join(sharedA,sharedB));
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,localA.toString());
try {
FSNamesystem.getNamespaceEditsDirs(conf);
fail("Allowed multiple shared edits directories");
}
catch ( IOException ioe) {
assertEquals("Multiple shared edits directories are not yet supported",ioe.getMessage());
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
/**
* Ensure that the standby fails to become active if it cannot read all
* available edits in the shared edits dir when it is transitioning to active
* state.
*/
@Test public void testFailureToReadEditsOnTransitionToActive() throws Exception {
assertTrue(fs.mkdirs(new Path(TEST_DIR1)));
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(0,3));
causeFailureOnEditLogRead();
assertTrue(fs.mkdirs(new Path(TEST_DIR2)));
assertTrue(fs.mkdirs(new Path(TEST_DIR3)));
try {
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
fail("Standby fully caught up, but should not have been able to");
}
catch ( HATestUtil.CouldNotCatchUpException e) {
}
cluster.shutdownNameNode(0);
try {
cluster.transitionToActive(1);
fail("Standby transitioned to active, but should not have been able to");
}
catch ( ExitException ee) {
GenericTestUtils.assertExceptionContains("Error replaying edit log",ee);
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierHybridVerifier
/**
* Test that the standby NN won't double-replay earlier edits if it encounters
* a failure to read a later edit.
*/
@Test public void testFailuretoReadEdits() throws Exception {
assertTrue(fs.mkdirs(new Path(TEST_DIR1)));
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
fs.setOwner(new Path(TEST_DIR1),"foo","bar");
assertTrue(fs.delete(new Path(TEST_DIR1),true));
assertTrue(fs.mkdirs(new Path(TEST_DIR2)));
assertTrue(fs.mkdirs(new Path(TEST_DIR3)));
LimitedEditLogAnswer answer=causeFailureOnEditLogRead();
try {
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
fail("Standby fully caught up, but should not have been able to");
}
catch ( HATestUtil.CouldNotCatchUpException e) {
}
assertNull(NameNodeAdapter.getFileInfo(nn1,TEST_DIR1,false));
assertTrue(NameNodeAdapter.getFileInfo(nn1,TEST_DIR2,false).isDir());
assertNull(NameNodeAdapter.getFileInfo(nn1,TEST_DIR3,false));
answer.setThrowExceptionOnRead(false);
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
assertNull(NameNodeAdapter.getFileInfo(nn1,TEST_DIR1,false));
assertTrue(NameNodeAdapter.getFileInfo(nn1,TEST_DIR2,false).isDir());
assertTrue(NameNodeAdapter.getFileInfo(nn1,TEST_DIR3,false).isDir());
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
/**
* Test the following case:
* 1. SBN is reading a finalized edits file when NFS disappears halfway
* through (or some intermittent error happens)
* 2. SBN performs a checkpoint and uploads it to the NN
* 3. NN receives a checkpoint that doesn't correspond to the end of any log
* segment
* 4. Both NN and SBN should be able to restart at this point.
* This is a regression test for HDFS-2766.
*/
@Test public void testCheckpointStartingMidEditsFile() throws Exception {
assertTrue(fs.mkdirs(new Path(TEST_DIR1)));
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
HATestUtil.waitForCheckpoint(cluster,1,ImmutableList.of(0,3));
HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(0,3));
causeFailureOnEditLogRead();
assertTrue(fs.mkdirs(new Path(TEST_DIR2)));
assertTrue(fs.mkdirs(new Path(TEST_DIR3)));
try {
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
fail("Standby fully caught up, but should not have been able to");
}
catch ( HATestUtil.CouldNotCatchUpException e) {
}
HATestUtil.waitForCheckpoint(cluster,1,ImmutableList.of(0,3,5));
HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(0,3,5));
cluster.restartNameNode(0);
HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(0,3,5));
FileSystem fs0=null;
try {
fs0=FileSystem.get(NameNode.getUri(nn0.getNameNodeAddress()),conf);
assertTrue(fs0.exists(new Path(TEST_DIR1)));
assertTrue(fs0.exists(new Path(TEST_DIR2)));
assertTrue(fs0.exists(new Path(TEST_DIR3)));
}
finally {
if (fs0 != null) fs0.close();
}
}
BranchVerifierUtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
/**
* DFS#isInSafeMode should check the ActiveNNs safemode in HA enabled cluster. HDFS-3507
* @throws Exception
*/
@Test public void testIsInSafemode() throws Exception {
NameNode nn2=cluster.getNameNode(1);
assertTrue("nn2 should be in standby state",nn2.isStandbyState());
InetSocketAddress nameNodeAddress=nn2.getNameNodeAddress();
Configuration conf=new Configuration();
DistributedFileSystem dfs=new DistributedFileSystem();
try {
dfs.initialize(URI.create("hdfs://" + nameNodeAddress.getHostName() + ":"+ nameNodeAddress.getPort()),conf);
dfs.isInSafeMode();
fail("StandBy should throw exception for isInSafeMode");
}
catch ( IOException e) {
if (e instanceof RemoteException) {
IOException sbExcpetion=((RemoteException)e).unwrapRemoteException();
assertTrue("StandBy nn should not support isInSafeMode",sbExcpetion instanceof StandbyException);
}
else {
throw e;
}
}
finally {
if (null != dfs) {
dfs.close();
}
}
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_ENTER,false);
DistributedFileSystem dfsWithFailOver=(DistributedFileSystem)fs;
assertTrue("ANN should be in SafeMode",dfsWithFailOver.isInSafeMode());
cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_LEAVE,false);
assertFalse("ANN should be out of SafeMode",dfsWithFailOver.isInSafeMode());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Make sure that when we transition to active in safe mode that we don't
* prematurely consider blocks missing just because not all DNs have reported
* yet.
* This is a regression test for HDFS-3921.
*/
@Test public void testNoPopulatingReplQueuesWhenStartingActiveInSafeMode() throws IOException {
DFSTestUtil.createFile(fs,new Path("/test"),15 * BLOCK_SIZE,(short)3,1L);
cluster.stopDataNode(1);
cluster.restartNameNode(0,false);
cluster.transitionToActive(0);
assertTrue(cluster.getNameNode(0).isInSafeMode());
assertEquals(0,cluster.getNamesystem(0).getMissingBlocksCount());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test for HDFS-2812. Since lease renewals go from the client
* only to the active NN, the SBN will have out-of-date lease
* info when it becomes active. We need to make sure we don't
* accidentally mark the leases as expired when the failover
* proceeds.
*/
@Test(timeout=120000) public void testLeasesRenewedOnTransition() throws Exception {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
FSDataOutputStream stm=null;
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
NameNode nn0=cluster.getNameNode(0);
NameNode nn1=cluster.getNameNode(1);
try {
cluster.waitActive();
cluster.transitionToActive(0);
LOG.info("Starting with NN 0 active");
stm=fs.create(TEST_FILE_PATH);
long nn0t0=NameNodeAdapter.getLeaseRenewalTime(nn0,TEST_FILE_STR);
assertTrue(nn0t0 > 0);
long nn1t0=NameNodeAdapter.getLeaseRenewalTime(nn1,TEST_FILE_STR);
assertEquals("Lease should not yet exist on nn1",-1,nn1t0);
Thread.sleep(5);
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
long nn1t1=NameNodeAdapter.getLeaseRenewalTime(nn1,TEST_FILE_STR);
assertTrue("Lease should have been created on standby. Time was: " + nn1t1,nn1t1 > nn0t0);
Thread.sleep(5);
LOG.info("Failing over to NN 1");
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
long nn1t2=NameNodeAdapter.getLeaseRenewalTime(nn1,TEST_FILE_STR);
assertTrue("Lease should have been renewed by failover process",nn1t2 > nn1t1);
}
finally {
IOUtils.closeStream(stm);
cluster.shutdown();
}
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This test also serves to test{@link HAUtil#getProxiesForAllNameNodesInNameservice(Configuration,String)} and{@link DFSUtil#getRpcAddressesForNameserviceId(Configuration,String,String)}by virtue of the fact that it wouldn't work properly if the proxies
* returned were not for the correct NNs.
*/
@Test public void testIsAtLeastOneActive() throws Exception {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(new HdfsConfiguration()).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
try {
Configuration conf=new HdfsConfiguration();
HATestUtil.setFailoverConfigurations(cluster,conf);
List namenodes=HAUtil.getProxiesForAllNameNodesInNameservice(conf,HATestUtil.getLogicalHostname(cluster));
assertEquals(2,namenodes.size());
assertFalse(HAUtil.isAtLeastOneActive(namenodes));
cluster.transitionToActive(0);
assertTrue(HAUtil.isAtLeastOneActive(namenodes));
cluster.transitionToStandby(0);
assertFalse(HAUtil.isAtLeastOneActive(namenodes));
cluster.transitionToActive(1);
assertTrue(HAUtil.isAtLeastOneActive(namenodes));
cluster.transitionToStandby(1);
assertFalse(HAUtil.isAtLeastOneActive(namenodes));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
/**
* Test which takes a single node and flip flops between
* active and standby mode, making sure it doesn't
* double-play any edits.
*/
@Test public void testTransitionActiveToStandby() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
try {
cluster.waitActive();
cluster.transitionToActive(0);
FileSystem fs=cluster.getFileSystem(0);
fs.mkdirs(TEST_DIR);
cluster.transitionToStandby(0);
try {
fs.mkdirs(new Path("/x"));
fail("Didn't throw trying to mutate FS in standby state");
}
catch ( Throwable t) {
GenericTestUtils.assertExceptionContains("Operation category WRITE is not supported",t);
}
cluster.transitionToActive(0);
DFSTestUtil.createFile(fs,new Path(TEST_DIR,"foo"),10,(short)1,1L);
fs.delete(TEST_DIR,true);
cluster.transitionToStandby(0);
cluster.transitionToActive(0);
assertFalse(fs.exists(TEST_DIR));
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifierBranchVerifierUtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
/**
* Test the scenario where the NN fails over after issuing a block
* synchronization request, but before it is committed. The
* DN running the recovery should then fail to commit the synchronization
* and a later retry will succeed.
*/
@Test(timeout=30000) public void testFailoverRightBeforeCommitSynchronization() throws Exception {
final Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,false);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE);
FSDataOutputStream stm=null;
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3).build();
try {
cluster.waitActive();
cluster.transitionToActive(0);
Thread.sleep(500);
LOG.info("Starting with NN 0 active");
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
stm=fs.create(TEST_PATH);
AppendTestUtil.write(stm,0,BLOCK_SIZE / 2);
stm.hflush();
NameNode nn0=cluster.getNameNode(0);
ExtendedBlock blk=DFSTestUtil.getFirstBlock(fs,TEST_PATH);
DatanodeDescriptor expectedPrimary=DFSTestUtil.getExpectedPrimaryNode(nn0,blk);
LOG.info("Expecting block recovery to be triggered on DN " + expectedPrimary);
DataNode primaryDN=cluster.getDataNode(expectedPrimary.getIpcPort());
DatanodeProtocolClientSideTranslatorPB nnSpy=DataNodeTestUtils.spyOnBposToNN(primaryDN,nn0);
DelayAnswer delayer=new DelayAnswer(LOG);
Mockito.doAnswer(delayer).when(nnSpy).commitBlockSynchronization(Mockito.eq(blk),Mockito.anyInt(),Mockito.anyLong(),Mockito.eq(true),Mockito.eq(false),(DatanodeID[])Mockito.anyObject(),(String[])Mockito.anyObject());
DistributedFileSystem fsOtherUser=createFsAsOtherUser(cluster,conf);
assertFalse(fsOtherUser.recoverLease(TEST_PATH));
LOG.info("Waiting for commitBlockSynchronization call from primary");
delayer.waitForCall();
LOG.info("Failing over to NN 1");
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
delayer.proceed();
delayer.waitForResult();
Throwable t=delayer.getThrown();
if (t == null) {
fail("commitBlockSynchronization call did not fail on standby");
}
GenericTestUtils.assertExceptionContains("Operation category WRITE is not supported",t);
loopRecoverLease(fsOtherUser,TEST_PATH);
AppendTestUtil.check(fs,TEST_PATH,BLOCK_SIZE / 2);
}
finally {
IOUtils.closeStream(stm);
cluster.shutdown();
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* 1. Run a set of operations
* 2. Trigger the NN failover
* 3. Check the retry cache on the original standby NN
*/
@Test(timeout=60000) public void testRetryCacheOnStandbyNN() throws Exception {
DFSTestUtil.runOperations(cluster,dfs,conf,BlockSize,0);
FSNamesystem fsn0=cluster.getNamesystem(0);
LightWeightCache cacheSet=(LightWeightCache)fsn0.getRetryCache().getCacheSet();
assertEquals(23,cacheSet.size());
Map oldEntries=new HashMap();
Iterator iter=cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry=iter.next();
oldEntries.put(entry,entry);
}
cluster.getNameNode(0).getRpcServer().rollEditLog();
cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits();
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
FSNamesystem fsn1=cluster.getNamesystem(1);
cacheSet=(LightWeightCache)fsn1.getRetryCache().getCacheSet();
assertEquals(23,cacheSet.size());
iter=cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry=iter.next();
assertTrue(oldEntries.containsKey(entry));
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
/**
* Make sure that clients will receive StandbyExceptions even when a
* checkpoint is in progress on the SBN, and therefore the StandbyCheckpointer
* thread will have FSNS lock. Regression test for HDFS-4591.
*/
@Test(timeout=300000) public void testStandbyExceptionThrownDuringCheckpoint() throws Exception {
FSImage spyImage1=NameNodeAdapter.spyOnFsImage(nn1);
DelayAnswer answerer=new DelayAnswer(LOG);
Mockito.doAnswer(answerer).when(spyImage1).saveNamespace(Mockito.any(FSNamesystem.class),Mockito.eq(NameNodeFile.IMAGE),Mockito.any(Canceler.class));
doEdits(0,1000);
nn0.getRpcServer().rollEditLog();
answerer.waitForCall();
assertTrue("SBN is not performing checkpoint but it should be.",answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
try {
nn1.getRpcServer().getFileInfo("/");
fail("Should have thrown StandbyException, but instead succeeded.");
}
catch ( StandbyException se) {
GenericTestUtils.assertExceptionContains("is not supported",se);
}
assertTrue("SBN should have still been checkpointing.",answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
answerer.proceed();
answerer.waitForResult();
assertTrue("SBN should have finished checkpointing.",answerer.getFireCount() == 1 && answerer.getResultCount() == 1);
}
APIUtilityVerifierIterativeVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test the snapshot limit of a single snapshottable directory.
* @throws Exception
*/
@Test(timeout=300000) public void testSnapshotLimit() throws Exception {
final int step=1000;
final String dirStr="/testSnapshotLimit/dir";
final Path dir=new Path(dirStr);
hdfs.mkdirs(dir,new FsPermission((short)0777));
hdfs.allowSnapshot(dir);
int s=0;
for (; s < SNAPSHOT_LIMIT; s++) {
final String snapshotName="s" + s;
hdfs.createSnapshot(dir,snapshotName);
if (s % step == 0) {
final Path file=new Path(dirStr,"f" + s);
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,SEED);
}
}
try {
hdfs.createSnapshot(dir,"s" + s);
Assert.fail("Expected to fail to create snapshot, but didn't.");
}
catch ( IOException ioe) {
SnapshotTestHelper.LOG.info("The exception is expected.",ioe);
}
for (int f=0; f < SNAPSHOT_LIMIT; f+=step) {
final String file="f" + f;
s=RANDOM.nextInt(step);
for (; s < SNAPSHOT_LIMIT; s+=RANDOM.nextInt(step)) {
final Path p=SnapshotTestHelper.getSnapshotPath(dir,"s" + s,file);
Assert.assertEquals(s > f,hdfs.exists(p));
}
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
@Test(timeout=300000) public void testSnapshotWithQuota() throws Exception {
final String dirStr="/testSnapshotWithQuota/dir";
final Path dir=new Path(dirStr);
hdfs.mkdirs(dir,new FsPermission((short)0777));
hdfs.allowSnapshot(dir);
final int NS_QUOTA=6;
hdfs.setQuota(dir,NS_QUOTA,HdfsConstants.QUOTA_DONT_SET);
final Path foo=new Path(dir,"foo");
final Path f1=new Path(foo,"f1");
DFSTestUtil.createFile(hdfs,f1,BLOCKSIZE,REPLICATION,SEED);
{
final Path snapshotPath=hdfs.createSnapshot(dir);
final String snapshotName=snapshotPath.getName();
Assert.assertTrue("snapshotName=" + snapshotName,Pattern.matches("s\\d\\d\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d\\.\\d\\d\\d",snapshotName));
final Path parent=snapshotPath.getParent();
Assert.assertEquals(HdfsConstants.DOT_SNAPSHOT_DIR,parent.getName());
Assert.assertEquals(dir,parent.getParent());
}
final Path f2=new Path(foo,"f2");
DFSTestUtil.createFile(hdfs,f2,BLOCKSIZE,REPLICATION,SEED);
try {
final Path f3=new Path(foo,"f3");
DFSTestUtil.createFile(hdfs,f3,BLOCKSIZE,REPLICATION,SEED);
Assert.fail();
}
catch ( NSQuotaExceededException e) {
SnapshotTestHelper.LOG.info("The exception is expected.",e);
}
try {
hdfs.createSnapshot(dir);
Assert.fail();
}
catch ( NSQuotaExceededException e) {
SnapshotTestHelper.LOG.info("The exception is expected.",e);
}
try {
hdfs.setPermission(f1,new FsPermission((short)0));
Assert.fail();
}
catch ( RemoteException e) {
Assert.assertSame(NSQuotaExceededException.class,e.unwrapRemoteException().getClass());
SnapshotTestHelper.LOG.info("The exception is expected.",e);
}
hdfs.setPermission(f2,new FsPermission((short)0));
hdfs.setQuota(dir,NS_QUOTA + 2,HdfsConstants.QUOTA_DONT_SET);
hdfs.createSnapshot(dir,"s1");
hdfs.setPermission(foo,new FsPermission((short)0444));
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test {@link Snapshot#ID_COMPARATOR}.
*/
@Test(timeout=300000) public void testIdCmp(){
final PermissionStatus perm=PermissionStatus.createImmutable("user","group",FsPermission.createImmutable((short)0));
final INodeDirectory snapshottable=new INodeDirectory(0,DFSUtil.string2Bytes("foo"),perm,0L);
snapshottable.addSnapshottableFeature();
final Snapshot[] snapshots={new Snapshot(1,"s1",snapshottable),new Snapshot(1,"s1",snapshottable),new Snapshot(2,"s2",snapshottable),new Snapshot(2,"s2",snapshottable)};
Assert.assertEquals(0,Snapshot.ID_COMPARATOR.compare(null,null));
for ( Snapshot s : snapshots) {
Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(null,s) > 0);
Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(s,null) < 0);
for ( Snapshot t : snapshots) {
final int expected=s.getRoot().getLocalName().compareTo(t.getRoot().getLocalName());
final int computed=Snapshot.ID_COMPARATOR.compare(s,t);
Assert.assertEquals(expected > 0,computed > 0);
Assert.assertEquals(expected == 0,computed == 0);
Assert.assertEquals(expected < 0,computed < 0);
}
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
@Test(timeout=300000) public void testRenameFromSDir2NonSDir() throws Exception {
final String dirStr="/testRenameWithSnapshot";
final String abcStr=dirStr + "/abc";
final Path abc=new Path(abcStr);
hdfs.mkdirs(abc,new FsPermission((short)0777));
hdfs.allowSnapshot(abc);
final Path foo=new Path(abc,"foo");
DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(abc,"s0");
try {
hdfs.rename(abc,new Path(dirStr,"tmp"));
fail("Expect exception since " + abc + " is snapshottable and already has snapshots");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains(abcStr + " is snapshottable and already has snapshots",e);
}
final String xyzStr=dirStr + "/xyz";
final Path xyz=new Path(xyzStr);
hdfs.mkdirs(xyz,new FsPermission((short)0777));
final Path bar=new Path(xyz,"bar");
hdfs.rename(foo,bar);
final INode fooRef=fsdir.getINode(SnapshotTestHelper.getSnapshotPath(abc,"s0","foo").toString());
Assert.assertTrue(fooRef.isReference());
Assert.assertTrue(fooRef.asReference() instanceof INodeReference.WithName);
final INodeReference.WithCount withCount=(INodeReference.WithCount)fooRef.asReference().getReferredINode();
Assert.assertEquals(2,withCount.getReferenceCount());
final INode barRef=fsdir.getINode(bar.toString());
Assert.assertTrue(barRef.isReference());
Assert.assertSame(withCount,barRef.asReference().getReferredINode());
hdfs.delete(bar,false);
Assert.assertEquals(1,withCount.getReferenceCount());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Make sure we clean the whole subtree under a DstReference node after
* deleting a snapshot.
* see HDFS-5476.
*/
@Test public void testCleanDstReference() throws Exception {
final Path test=new Path("/test");
final Path foo=new Path(test,"foo");
final Path bar=new Path(foo,"bar");
hdfs.mkdirs(bar);
SnapshotTestHelper.createSnapshot(hdfs,test,"s0");
final Path fileInBar=new Path(bar,"file");
DFSTestUtil.createFile(hdfs,fileInBar,BLOCKSIZE,REPL,SEED);
final Path foo2=new Path(test,"foo2");
hdfs.rename(foo,foo2);
hdfs.createSnapshot(test,"s1");
hdfs.delete(new Path(foo2,"bar"),true);
hdfs.delete(foo2,true);
final Path sfileInBar=SnapshotTestHelper.getSnapshotPath(test,"s1","foo2/bar/file");
assertTrue(hdfs.exists(sfileInBar));
hdfs.deleteSnapshot(test,"s1");
assertFalse(hdfs.exists(sfileInBar));
restartClusterAndCheckImage(true);
final Path barInS0=SnapshotTestHelper.getSnapshotPath(test,"s0","foo/bar");
INodeDirectory barNode=fsdir.getINode(barInS0.toString()).asDirectory();
assertEquals(0,barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).size());
List diffList=barNode.getDiffs().asList();
assertEquals(1,diffList.size());
DirectoryDiff diff=diffList.get(0);
assertEquals(0,diff.getChildrenDiff().getList(ListType.DELETED).size());
assertEquals(0,diff.getChildrenDiff().getList(ListType.CREATED).size());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test the undo section of rename. Before the rename, we create the renamed
* file/dir before taking the snapshot.
*/
@Test public void testRenameUndo_1() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
final Path dir2file=new Path(sdir2,"file");
DFSTestUtil.createFile(hdfs,dir2file,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
INodeDirectory dir2=fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2=spy(dir2);
doReturn(false).when(mockDir2).addChild((INode)anyObject(),anyBoolean(),Mockito.anyInt());
INodeDirectory root=fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2,mockDir2,fsdir.getINodeMap());
final Path newfoo=new Path(sdir2,"foo");
boolean result=hdfs.rename(foo,newfoo);
assertFalse(result);
INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList dir1Children=dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir1Children.size());
assertEquals(foo.getName(),dir1Children.get(0).getLocalName());
List dir1Diffs=dir1Node.getDiffs().asList();
assertEquals(1,dir1Diffs.size());
assertEquals(s1.getId(),dir1Diffs.get(0).getSnapshotId());
ChildrenDiff childrenDiff=dir1Diffs.get(0).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(0,childrenDiff.getList(ListType.CREATED).size());
INode fooNode=fsdir.getINode4Write(foo.toString());
assertTrue(fooNode.isDirectory() && fooNode.asDirectory().isWithSnapshot());
List fooDiffs=fooNode.asDirectory().getDiffs().asList();
assertEquals(1,fooDiffs.size());
assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId());
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo");
INode fooNode_s1=fsdir.getINode(foo_s1.toString());
assertTrue(fooNode_s1 == fooNode);
assertFalse(hdfs.exists(newfoo));
INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir2Children.size());
assertEquals(dir2file.getName(),dir2Children.get(0).getLocalName());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
/**
* Unit test for HDFS-4842.
*/
@Test public void testRenameDirAndDeleteSnapshot_7() throws Exception {
fsn.getSnapshotManager().setAllowNestedSnapshots(true);
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
final Path foo=new Path(dir2,"foo");
final Path bar=new Path(foo,"bar");
final Path file=new Path(bar,"file");
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,test,"s0");
SnapshotTestHelper.createSnapshot(hdfs,test,"s1");
hdfs.delete(file,true);
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
final Path newfoo=new Path(dir1,foo.getName());
hdfs.rename(foo,newfoo);
hdfs.deleteSnapshot(test,"s1");
final Path file_s2=SnapshotTestHelper.getSnapshotPath(dir2,"s2","foo/bar/file");
assertFalse(hdfs.exists(file_s2));
final Path file_s0=SnapshotTestHelper.getSnapshotPath(test,"s0","dir2/foo/bar/file");
assertTrue(hdfs.exists(file_s0));
INodeDirectory dir1Node=fsdir.getINode4Write(dir1.toString()).asDirectory();
List dir1DiffList=dir1Node.getDiffs().asList();
assertEquals(1,dir1DiffList.size());
List dList=dir1DiffList.get(0).getChildrenDiff().getList(ListType.DELETED);
assertTrue(dList.isEmpty());
List cList=dir1DiffList.get(0).getChildrenDiff().getList(ListType.CREATED);
assertEquals(1,cList.size());
INode cNode=cList.get(0);
INode fooNode=fsdir.getINode4Write(newfoo.toString());
assertSame(cNode,fooNode);
final Path newbar=new Path(newfoo,bar.getName());
INodeDirectory barNode=fsdir.getINode4Write(newbar.toString()).asDirectory();
assertSame(fooNode.asDirectory(),barNode.getParent());
List barDiffList=barNode.getDiffs().asList();
assertEquals(1,barDiffList.size());
DirectoryDiff diff=barDiffList.get(0);
INodeDirectory testNode=fsdir.getINode4Write(test.toString()).asDirectory();
Snapshot s0=testNode.getSnapshot(DFSUtil.string2Bytes("s0"));
assertEquals(s0.getId(),diff.getSnapshotId());
assertEquals("file",diff.getChildrenDiff().getList(ListType.DELETED).get(0).getLocalName());
INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory();
List dir2DiffList=dir2Node.getDiffs().asList();
assertEquals(1,dir2DiffList.size());
dList=dir2DiffList.get(0).getChildrenDiff().getList(ListType.DELETED);
assertEquals(1,dList.size());
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(dir2,"s2",foo.getName());
INodeReference.WithName fooNode_s2=(INodeReference.WithName)fsdir.getINode(foo_s2.toString());
assertSame(dList.get(0),fooNode_s2);
assertSame(fooNode.asReference().getReferredINode(),fooNode_s2.getReferredINode());
restartClusterAndCheckImage(true);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test the undo section of rename. Before the rename, we create the renamed
* file/dir after taking the snapshot.
*/
@Test public void testRenameUndo_2() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path dir2file=new Path(sdir2,"file");
DFSTestUtil.createFile(hdfs,dir2file,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
INodeDirectory dir2=fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2=spy(dir2);
doReturn(false).when(mockDir2).addChild((INode)anyObject(),anyBoolean(),Mockito.anyInt());
INodeDirectory root=fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2,mockDir2,fsdir.getINodeMap());
final Path newfoo=new Path(sdir2,"foo");
boolean result=hdfs.rename(foo,newfoo);
assertFalse(result);
INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList dir1Children=dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir1Children.size());
assertEquals(foo.getName(),dir1Children.get(0).getLocalName());
List dir1Diffs=dir1Node.getDiffs().asList();
assertEquals(1,dir1Diffs.size());
assertEquals(s1.getId(),dir1Diffs.get(0).getSnapshotId());
ChildrenDiff childrenDiff=dir1Diffs.get(0).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(1,childrenDiff.getList(ListType.CREATED).size());
INode fooNode=fsdir.getINode4Write(foo.toString());
assertTrue(fooNode instanceof INodeDirectory);
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo");
assertFalse(hdfs.exists(foo_s1));
assertFalse(hdfs.exists(newfoo));
INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir2Children.size());
assertEquals(dir2file.getName(),dir2Children.get(0).getLocalName());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test rename a dir and a file multiple times across snapshottable
* directories: /dir1/foo -> /dir2/foo -> /dir3/foo -> /dir2/foo -> /dir1/foo
* Only create snapshots in the beginning (before the rename).
*/
@Test public void testRenameMoreThanOnceAcrossSnapDirs() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path sdir3=new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo_dir1=new Path(sdir1,"foo");
final Path bar1_dir1=new Path(foo_dir1,"bar1");
final Path bar2_dir1=new Path(sdir1,"bar");
DFSTestUtil.createFile(hdfs,bar1_dir1,BLOCKSIZE,REPL,SEED);
DFSTestUtil.createFile(hdfs,bar2_dir1,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s3");
final Path foo_dir2=new Path(sdir2,"foo");
hdfs.rename(foo_dir1,foo_dir2);
final Path bar2_dir2=new Path(sdir2,"bar");
hdfs.rename(bar2_dir1,bar2_dir2);
restartClusterAndCheckImage(true);
final Path bar1_dir2=new Path(foo_dir2,"bar1");
hdfs.setReplication(bar1_dir2,REPL_1);
hdfs.setReplication(bar2_dir2,REPL_1);
final Path bar1_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo/bar1");
final Path bar2_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","bar");
final Path bar1_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar1");
final Path bar2_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
FileStatus statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir2);
assertEquals(REPL_1,statusBar1.getReplication());
FileStatus statusBar2=hdfs.getFileStatus(bar2_s1);
assertEquals(REPL,statusBar2.getReplication());
statusBar2=hdfs.getFileStatus(bar2_dir2);
assertEquals(REPL_1,statusBar2.getReplication());
final Path foo_dir3=new Path(sdir3,"foo");
hdfs.rename(foo_dir2,foo_dir3);
final Path bar2_dir3=new Path(sdir3,"bar");
hdfs.rename(bar2_dir2,bar2_dir3);
restartClusterAndCheckImage(true);
final Path bar1_dir3=new Path(foo_dir3,"bar1");
hdfs.setReplication(bar1_dir3,REPL_2);
hdfs.setReplication(bar2_dir3,REPL_2);
final Path bar1_s3=SnapshotTestHelper.getSnapshotPath(sdir3,"s3","foo/bar1");
final Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir3,"s3","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar1_s3));
assertFalse(hdfs.exists(bar2_s3));
statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir3);
assertEquals(REPL_2,statusBar1.getReplication());
statusBar2=hdfs.getFileStatus(bar2_s1);
assertEquals(REPL,statusBar2.getReplication());
statusBar2=hdfs.getFileStatus(bar2_dir3);
assertEquals(REPL_2,statusBar2.getReplication());
hdfs.rename(foo_dir3,foo_dir2);
hdfs.rename(bar2_dir3,bar2_dir2);
restartClusterAndCheckImage(true);
hdfs.setReplication(bar1_dir2,REPL);
hdfs.setReplication(bar2_dir2,REPL);
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar1_s3));
assertFalse(hdfs.exists(bar2_s3));
statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir2);
assertEquals(REPL,statusBar1.getReplication());
statusBar2=hdfs.getFileStatus(bar2_s1);
assertEquals(REPL,statusBar2.getReplication());
statusBar2=hdfs.getFileStatus(bar2_dir2);
assertEquals(REPL,statusBar2.getReplication());
hdfs.rename(foo_dir2,foo_dir1);
hdfs.rename(bar2_dir2,bar2_dir1);
INodeReference fooRef=fsdir.getINode4Write(foo_dir1.toString()).asReference();
INodeReference.WithCount fooWithCount=(WithCount)fooRef.getReferredINode();
assertEquals(2,fooWithCount.getReferenceCount());
INodeDirectory foo=fooWithCount.asDirectory();
assertEquals(1,foo.getDiffs().asList().size());
INodeDirectory sdir1Node=fsdir.getINode(sdir1.toString()).asDirectory();
Snapshot s1=sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(),foo.getDirectoryWithSnapshotFeature().getLastSnapshotId());
INodeFile bar1=fsdir.getINode4Write(bar1_dir1.toString()).asFile();
assertEquals(1,bar1.getDiffs().asList().size());
assertEquals(s1.getId(),bar1.getDiffs().getLastSnapshotId());
INodeReference barRef=fsdir.getINode4Write(bar2_dir1.toString()).asReference();
INodeReference.WithCount barWithCount=(WithCount)barRef.getReferredINode();
assertEquals(2,barWithCount.getReferenceCount());
INodeFile bar=barWithCount.asFile();
assertEquals(1,bar.getDiffs().asList().size());
assertEquals(s1.getId(),bar.getDiffs().getLastSnapshotId());
restartClusterAndCheckImage(true);
hdfs.delete(foo_dir1,true);
hdfs.delete(bar2_dir1,true);
restartClusterAndCheckImage(true);
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar1_s3));
assertFalse(hdfs.exists(bar2_s3));
assertFalse(hdfs.exists(foo_dir1));
assertFalse(hdfs.exists(bar1_dir1));
assertFalse(hdfs.exists(bar2_dir1));
statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar2=hdfs.getFileStatus(bar2_s1);
assertEquals(REPL,statusBar2.getReplication());
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo");
fooRef=fsdir.getINode(foo_s1.toString()).asReference();
fooWithCount=(WithCount)fooRef.getReferredINode();
assertEquals(1,fooWithCount.getReferenceCount());
barRef=fsdir.getINode(bar2_s1.toString()).asReference();
barWithCount=(WithCount)barRef.getReferredINode();
assertEquals(1,barWithCount.getReferenceCount());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test the undo section of the second-time rename.
*/
@Test public void testRenameUndo_3() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path sdir3=new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
INodeDirectory dir3=fsdir.getINode4Write(sdir3.toString()).asDirectory();
INodeDirectory mockDir3=spy(dir3);
doReturn(false).when(mockDir3).addChild((INode)anyObject(),anyBoolean(),Mockito.anyInt());
INodeDirectory root=fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir3,mockDir3,fsdir.getINodeMap());
final Path foo_dir2=new Path(sdir2,"foo2");
final Path foo_dir3=new Path(sdir3,"foo3");
hdfs.rename(foo,foo_dir2);
boolean result=hdfs.rename(foo_dir2,foo_dir3);
assertFalse(result);
INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
Snapshot s2=dir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
ReadOnlyList dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir2Children.size());
List dir2Diffs=dir2Node.getDiffs().asList();
assertEquals(1,dir2Diffs.size());
assertEquals(s2.getId(),dir2Diffs.get(0).getSnapshotId());
ChildrenDiff childrenDiff=dir2Diffs.get(0).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(1,childrenDiff.getList(ListType.CREATED).size());
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo2");
assertFalse(hdfs.exists(foo_s2));
INode fooNode=fsdir.getINode4Write(foo_dir2.toString());
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
assertTrue(fooNode instanceof INodeReference.DstReference);
List fooDiffs=fooNode.asDirectory().getDiffs().asList();
assertEquals(1,fooDiffs.size());
assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId());
hdfs.createSnapshot(sdir2,"s3");
result=hdfs.rename(foo_dir2,foo_dir3);
assertFalse(result);
dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
Snapshot s3=dir2Node.getSnapshot(DFSUtil.string2Bytes("s3"));
fooNode=fsdir.getINode4Write(foo_dir2.toString());
dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir2Children.size());
dir2Diffs=dir2Node.getDiffs().asList();
assertEquals(2,dir2Diffs.size());
assertEquals(s2.getId(),dir2Diffs.get(0).getSnapshotId());
assertEquals(s3.getId(),dir2Diffs.get(1).getSnapshotId());
childrenDiff=dir2Diffs.get(0).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(1,childrenDiff.getList(ListType.CREATED).size());
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
childrenDiff=dir2Diffs.get(1).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(0,childrenDiff.getList(ListType.CREATED).size());
final Path foo_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo2");
assertFalse(hdfs.exists(foo_s2));
assertTrue(hdfs.exists(foo_s3));
assertTrue(fooNode instanceof INodeReference.DstReference);
fooDiffs=fooNode.asDirectory().getDiffs().asList();
assertEquals(2,fooDiffs.size());
assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId());
assertEquals(s3.getId(),fooDiffs.get(1).getSnapshotId());
}
UtilityVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
/**
* Test undo where dst node being overwritten is a reference node
*/
@Test public void testRenameUndo_4() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path sdir3=new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
final Path foo2=new Path(sdir2,"foo2");
hdfs.mkdirs(foo2);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
final Path foo3=new Path(sdir3,"foo3");
hdfs.rename(foo2,foo3);
INode foo3Node=fsdir.getINode4Write(foo3.toString());
assertTrue(foo3Node.isReference());
INodeDirectory dir3=fsdir.getINode4Write(sdir3.toString()).asDirectory();
INodeDirectory mockDir3=spy(dir3);
doReturn(false).when(mockDir3).addChild((INode)Mockito.isNull(),anyBoolean(),Mockito.anyInt());
Mockito.when(mockDir3.addChild((INode)Mockito.isNotNull(),anyBoolean(),Mockito.anyInt())).thenReturn(false).thenCallRealMethod();
INodeDirectory root=fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir3,mockDir3,fsdir.getINodeMap());
foo3Node.setParent(mockDir3);
try {
hdfs.rename(foo,foo3,Rename.OVERWRITE);
fail("the rename from " + foo + " to "+ foo3+ " should fail");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("rename from " + foo + " to "+ foo3+ " failed.",e);
}
final INode foo3Node_undo=fsdir.getINode4Write(foo3.toString());
assertSame(foo3Node,foo3Node_undo);
INodeReference.WithCount foo3_wc=(WithCount)foo3Node.asReference().getReferredINode();
assertEquals(2,foo3_wc.getReferenceCount());
assertSame(foo3Node,foo3_wc.getParentReference());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
/**
* After the following operations:
* Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
* again -> delete snapshot s on dst tree
* Make sure we only delete the snapshot s under the renamed dir.
*/
@Test public void testRenameDirAndDeleteSnapshot_4() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
hdfs.mkdirs(sdir2);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
final Path foo2=new Path(sdir2,"foo");
hdfs.rename(foo,foo2);
final Path bar2=new Path(foo2,"bar2");
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
final Path bar3=new Path(foo2,"bar3");
DFSTestUtil.createFile(hdfs,bar3,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sdir2,"s3");
hdfs.rename(foo2,foo);
hdfs.deleteSnapshot(sdir2,"s3");
final INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Quota.Counts q1=dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(9,q1.get(Quota.NAMESPACE));
final INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
Quota.Counts q2=dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(2,q2.get(Quota.NAMESPACE));
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1",foo.getName());
final INode fooRef=fsdir.getINode(foo_s1.toString());
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount wc=(WithCount)fooRef.asReference().getReferredINode();
assertEquals(2,wc.getReferenceCount());
INodeDirectory fooNode=wc.getReferredINode().asDirectory();
ReadOnlyList children=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(3,children.size());
assertEquals(bar.getName(),children.get(0).getLocalName());
assertEquals(bar2.getName(),children.get(1).getLocalName());
assertEquals(bar3.getName(),children.get(2).getLocalName());
List diffList=fooNode.getDiffs().asList();
assertEquals(1,diffList.size());
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(),diffList.get(0).getSnapshotId());
ChildrenDiff diff=diffList.get(0).getChildrenDiff();
assertEquals(2,diff.getList(ListType.CREATED).size());
assertEquals(0,diff.getList(ListType.DELETED).size());
final INode fooRef2=fsdir.getINode4Write(foo.toString());
assertTrue(fooRef2 instanceof INodeReference.DstReference);
INodeReference.WithCount wc2=(WithCount)fooRef2.asReference().getReferredINode();
assertSame(wc,wc2);
assertSame(fooRef2,wc.getParentReference());
restartClusterAndCheckImage(true);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test rename a dir multiple times across snapshottable directories:
* /dir1/foo -> /dir2/foo -> /dir3/foo -> /dir2/foo -> /dir1/foo
* Create snapshots after each rename.
*/
@Test public void testRenameMoreThanOnceAcrossSnapDirs_2() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path sdir3=new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo_dir1=new Path(sdir1,"foo");
final Path bar1_dir1=new Path(foo_dir1,"bar1");
final Path bar_dir1=new Path(sdir1,"bar");
DFSTestUtil.createFile(hdfs,bar1_dir1,BLOCKSIZE,REPL,SEED);
DFSTestUtil.createFile(hdfs,bar_dir1,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s3");
final Path foo_dir2=new Path(sdir2,"foo");
hdfs.rename(foo_dir1,foo_dir2);
final Path bar_dir2=new Path(sdir2,"bar");
hdfs.rename(bar_dir1,bar_dir2);
final Path bar1_dir2=new Path(foo_dir2,"bar1");
hdfs.setReplication(bar1_dir2,REPL_1);
hdfs.setReplication(bar_dir2,REPL_1);
restartClusterAndCheckImage(true);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s11");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s22");
SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s33");
final Path foo_dir3=new Path(sdir3,"foo");
hdfs.rename(foo_dir2,foo_dir3);
final Path bar_dir3=new Path(sdir3,"bar");
hdfs.rename(bar_dir2,bar_dir3);
final Path bar1_dir3=new Path(foo_dir3,"bar1");
hdfs.setReplication(bar1_dir3,REPL_2);
hdfs.setReplication(bar_dir3,REPL_2);
restartClusterAndCheckImage(true);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s111");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s222");
SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s333");
final Path bar1_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo/bar1");
final Path bar1_s22=SnapshotTestHelper.getSnapshotPath(sdir2,"s22","foo/bar1");
final Path bar1_s333=SnapshotTestHelper.getSnapshotPath(sdir3,"s333","foo/bar1");
final Path bar_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","bar");
final Path bar_s22=SnapshotTestHelper.getSnapshotPath(sdir2,"s22","bar");
final Path bar_s333=SnapshotTestHelper.getSnapshotPath(sdir3,"s333","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar1_s22));
assertTrue(hdfs.exists(bar1_s333));
assertTrue(hdfs.exists(bar_s1));
assertTrue(hdfs.exists(bar_s22));
assertTrue(hdfs.exists(bar_s333));
FileStatus statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir3);
assertEquals(REPL_2,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s22);
assertEquals(REPL_1,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s333);
assertEquals(REPL_2,statusBar1.getReplication());
FileStatus statusBar=hdfs.getFileStatus(bar_s1);
assertEquals(REPL,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_dir3);
assertEquals(REPL_2,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s22);
assertEquals(REPL_1,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s333);
assertEquals(REPL_2,statusBar.getReplication());
hdfs.rename(foo_dir3,foo_dir2);
hdfs.rename(bar_dir3,bar_dir2);
hdfs.setReplication(bar1_dir2,REPL);
hdfs.setReplication(bar_dir2,REPL);
restartClusterAndCheckImage(true);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1111");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2222");
final Path bar1_s2222=SnapshotTestHelper.getSnapshotPath(sdir2,"s2222","foo/bar1");
final Path bar_s2222=SnapshotTestHelper.getSnapshotPath(sdir2,"s2222","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar1_s22));
assertTrue(hdfs.exists(bar1_s333));
assertTrue(hdfs.exists(bar1_s2222));
assertTrue(hdfs.exists(bar_s1));
assertTrue(hdfs.exists(bar_s22));
assertTrue(hdfs.exists(bar_s333));
assertTrue(hdfs.exists(bar_s2222));
statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir2);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s22);
assertEquals(REPL_1,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s333);
assertEquals(REPL_2,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s2222);
assertEquals(REPL,statusBar1.getReplication());
statusBar=hdfs.getFileStatus(bar_s1);
assertEquals(REPL,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_dir2);
assertEquals(REPL,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s22);
assertEquals(REPL_1,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s333);
assertEquals(REPL_2,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s2222);
assertEquals(REPL,statusBar.getReplication());
hdfs.rename(foo_dir2,foo_dir1);
hdfs.rename(bar_dir2,bar_dir1);
INodeDirectory sdir1Node=fsdir.getINode(sdir1.toString()).asDirectory();
INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory();
INodeDirectory sdir3Node=fsdir.getINode(sdir3.toString()).asDirectory();
INodeReference fooRef=fsdir.getINode4Write(foo_dir1.toString()).asReference();
INodeReference.WithCount fooWithCount=(WithCount)fooRef.getReferredINode();
assertEquals(5,fooWithCount.getReferenceCount());
INodeDirectory foo=fooWithCount.asDirectory();
List fooDiffs=foo.getDiffs().asList();
assertEquals(4,fooDiffs.size());
Snapshot s2222=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2222"));
Snapshot s333=sdir3Node.getSnapshot(DFSUtil.string2Bytes("s333"));
Snapshot s22=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s22"));
Snapshot s1=sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s2222.getId(),fooDiffs.get(3).getSnapshotId());
assertEquals(s333.getId(),fooDiffs.get(2).getSnapshotId());
assertEquals(s22.getId(),fooDiffs.get(1).getSnapshotId());
assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId());
INodeFile bar1=fsdir.getINode4Write(bar1_dir1.toString()).asFile();
List bar1Diffs=bar1.getDiffs().asList();
assertEquals(3,bar1Diffs.size());
assertEquals(s333.getId(),bar1Diffs.get(2).getSnapshotId());
assertEquals(s22.getId(),bar1Diffs.get(1).getSnapshotId());
assertEquals(s1.getId(),bar1Diffs.get(0).getSnapshotId());
INodeReference barRef=fsdir.getINode4Write(bar_dir1.toString()).asReference();
INodeReference.WithCount barWithCount=(WithCount)barRef.getReferredINode();
assertEquals(5,barWithCount.getReferenceCount());
INodeFile bar=barWithCount.asFile();
List barDiffs=bar.getDiffs().asList();
assertEquals(4,barDiffs.size());
assertEquals(s2222.getId(),barDiffs.get(3).getSnapshotId());
assertEquals(s333.getId(),barDiffs.get(2).getSnapshotId());
assertEquals(s22.getId(),barDiffs.get(1).getSnapshotId());
assertEquals(s1.getId(),barDiffs.get(0).getSnapshotId());
restartClusterAndCheckImage(true);
hdfs.delete(foo_dir1,true);
hdfs.delete(bar_dir1,true);
restartClusterAndCheckImage(true);
final Path bar1_s1111=SnapshotTestHelper.getSnapshotPath(sdir1,"s1111","foo/bar1");
final Path bar_s1111=SnapshotTestHelper.getSnapshotPath(sdir1,"s1111","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar1_s22));
assertTrue(hdfs.exists(bar1_s333));
assertTrue(hdfs.exists(bar1_s2222));
assertFalse(hdfs.exists(bar1_s1111));
assertTrue(hdfs.exists(bar_s1));
assertTrue(hdfs.exists(bar_s22));
assertTrue(hdfs.exists(bar_s333));
assertTrue(hdfs.exists(bar_s2222));
assertFalse(hdfs.exists(bar_s1111));
final Path foo_s2222=SnapshotTestHelper.getSnapshotPath(sdir2,"s2222","foo");
fooRef=fsdir.getINode(foo_s2222.toString()).asReference();
fooWithCount=(WithCount)fooRef.getReferredINode();
assertEquals(4,fooWithCount.getReferenceCount());
foo=fooWithCount.asDirectory();
fooDiffs=foo.getDiffs().asList();
assertEquals(4,fooDiffs.size());
assertEquals(s2222.getId(),fooDiffs.get(3).getSnapshotId());
bar1Diffs=bar1.getDiffs().asList();
assertEquals(3,bar1Diffs.size());
assertEquals(s333.getId(),bar1Diffs.get(2).getSnapshotId());
barRef=fsdir.getINode(bar_s2222.toString()).asReference();
barWithCount=(WithCount)barRef.getReferredINode();
assertEquals(4,barWithCount.getReferenceCount());
bar=barWithCount.asFile();
barDiffs=bar.getDiffs().asList();
assertEquals(4,barDiffs.size());
assertEquals(s2222.getId(),barDiffs.get(3).getSnapshotId());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
/**
* Test rename while the rename operation will exceed the quota in the dst
* tree.
*/
@Test public void testRenameUndo_5() throws Exception {
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
final Path subdir2=new Path(dir2,"subdir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(subdir2);
final Path foo=new Path(dir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
hdfs.setQuota(dir2,5,Long.MAX_VALUE - 1);
final Path foo2=new Path(subdir2,foo.getName());
boolean rename=hdfs.rename(foo,foo2);
assertFalse(rename);
assertTrue(hdfs.exists(foo));
assertTrue(hdfs.exists(bar));
INodeDirectory dir1Node=fsdir.getINode4Write(dir1.toString()).asDirectory();
List childrenList=ReadOnlyList.Util.asList(dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1,childrenList.size());
INode fooNode=childrenList.get(0);
assertTrue(fooNode.asDirectory().isWithSnapshot());
INode barNode=fsdir.getINode4Write(bar.toString());
assertTrue(barNode.getClass() == INodeFile.class);
assertSame(fooNode,barNode.getParent());
List diffList=dir1Node.getDiffs().asList();
assertEquals(1,diffList.size());
DirectoryDiff diff=diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory();
assertTrue(dir2Node.isSnapshottable());
Quota.Counts counts=dir2Node.computeQuotaUsage();
assertEquals(3,counts.get(Quota.NAMESPACE));
assertEquals(0,counts.get(Quota.DISKSPACE));
childrenList=ReadOnlyList.Util.asList(dir2Node.asDirectory().getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1,childrenList.size());
INode subdir2Node=childrenList.get(0);
assertSame(dir2Node,subdir2Node.getParent());
assertSame(subdir2Node,fsdir.getINode4Write(subdir2.toString()));
diffList=dir2Node.getDiffs().asList();
assertEquals(1,diffList.size());
diff=diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* After the following operations:
* Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
* -> delete snapshot s on dst tree
* Make sure we destroy everything created after the rename under the renamed
* dir.
*/
@Test public void testRenameDirAndDeleteSnapshot_3() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
hdfs.mkdirs(sdir2);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
final Path foo2=new Path(sdir2,"foo");
hdfs.rename(foo,foo2);
final Path bar2=new Path(foo2,"bar2");
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
final Path bar3=new Path(foo2,"bar3");
DFSTestUtil.createFile(hdfs,bar3,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sdir2,"s3");
hdfs.delete(foo2,true);
hdfs.deleteSnapshot(sdir2,"s3");
final INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Quota.Counts q1=dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(4,q1.get(Quota.NAMESPACE));
final INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
Quota.Counts q2=dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(2,q2.get(Quota.NAMESPACE));
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1",foo.getName());
INode fooRef=fsdir.getINode(foo_s1.toString());
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount wc=(WithCount)fooRef.asReference().getReferredINode();
assertEquals(1,wc.getReferenceCount());
INodeDirectory fooNode=wc.getReferredINode().asDirectory();
ReadOnlyList children=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,children.size());
assertEquals(bar.getName(),children.get(0).getLocalName());
List diffList=fooNode.getDiffs().asList();
assertEquals(1,diffList.size());
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(),diffList.get(0).getSnapshotId());
ChildrenDiff diff=diffList.get(0).getChildrenDiff();
assertEquals(0,diff.getList(ListType.CREATED).size());
assertEquals(0,diff.getList(ListType.DELETED).size());
restartClusterAndCheckImage(true);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test the rename undo when quota of dst tree is exceeded after rename.
*/
@Test public void testRenameExceedQuota() throws Exception {
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
final Path sub_dir2=new Path(dir2,"subdir");
final Path subfile_dir2=new Path(sub_dir2,"subfile");
hdfs.mkdirs(dir1);
DFSTestUtil.createFile(hdfs,subfile_dir2,BLOCKSIZE,REPL,SEED);
final Path foo=new Path(dir1,"foo");
DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
hdfs.setQuota(dir2,5,Long.MAX_VALUE - 1);
hdfs.rename(foo,subfile_dir2,Rename.OVERWRITE);
INode dir2Node=fsdir.getINode4Write(dir2.toString());
assertTrue(dir2Node.asDirectory().isSnapshottable());
Quota.Counts counts=dir2Node.computeQuotaUsage();
assertEquals(7,counts.get(Quota.NAMESPACE));
assertEquals(BLOCKSIZE * REPL * 2,counts.get(Quota.DISKSPACE));
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
/**
* Test the rename undo when removing dst node fails
*/
@Test public void testRenameUndo_6() throws Exception {
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
final Path sub_dir2=new Path(dir2,"subdir");
final Path subsub_dir2=new Path(sub_dir2,"subdir");
hdfs.mkdirs(dir1);
hdfs.mkdirs(subsub_dir2);
final Path foo=new Path(dir1,"foo");
hdfs.mkdirs(foo);
SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
hdfs.setQuota(dir2,4,Long.MAX_VALUE - 1);
try {
hdfs.rename(foo,subsub_dir2,Rename.OVERWRITE);
fail("Expect QuotaExceedException");
}
catch ( QuotaExceededException e) {
String msg="Failed to record modification for snapshot: " + "The NameSpace quota (directories and files)" + " is exceeded: quota=4 file count=5";
GenericTestUtils.assertExceptionContains(msg,e);
}
assertTrue(hdfs.exists(foo));
INodeDirectory dir1Node=fsdir.getINode4Write(dir1.toString()).asDirectory();
List childrenList=ReadOnlyList.Util.asList(dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1,childrenList.size());
INode fooNode=childrenList.get(0);
assertTrue(fooNode.asDirectory().isWithSnapshot());
assertSame(dir1Node,fooNode.getParent());
List diffList=dir1Node.getDiffs().asList();
assertEquals(1,diffList.size());
DirectoryDiff diff=diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory();
assertTrue(dir2Node.isSnapshottable());
Quota.Counts counts=dir2Node.computeQuotaUsage();
assertEquals(4,counts.get(Quota.NAMESPACE));
assertEquals(0,counts.get(Quota.DISKSPACE));
childrenList=ReadOnlyList.Util.asList(dir2Node.asDirectory().getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1,childrenList.size());
INode subdir2Node=childrenList.get(0);
assertTrue(subdir2Node.asDirectory().isWithSnapshot());
assertSame(dir2Node,subdir2Node.getParent());
assertSame(subdir2Node,fsdir.getINode4Write(sub_dir2.toString()));
INode subsubdir2Node=fsdir.getINode4Write(subsub_dir2.toString());
assertTrue(subsubdir2Node.getClass() == INodeDirectory.class);
assertSame(subdir2Node,subsubdir2Node.getParent());
diffList=(dir2Node).getDiffs().asList();
assertEquals(1,diffList.size());
diff=diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
diffList=subdir2Node.asDirectory().getDiffs().asList();
assertEquals(0,diffList.size());
}
InternalCallVerifierBooleanVerifierIdentityVerifierHybridVerifier
/**
* This test demonstrates that {@link INodeDirectory#removeChild(INode,Snapshot)}and {@link INodeDirectory#addChild(INode,boolean,Snapshot)}should use {@link INode#isInLatestSnapshot(Snapshot)} to check if the
* added/removed child should be recorded in snapshots.
*/
@Test public void testRenameDirAndDeleteSnapshot_5() throws Exception {
final Path dir1=new Path("/dir1");
final Path dir2=new Path("/dir2");
final Path dir3=new Path("/dir3");
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
hdfs.mkdirs(dir3);
final Path foo=new Path(dir1,"foo");
hdfs.mkdirs(foo);
SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
hdfs.deleteSnapshot(dir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
final Path foo2=new Path(dir2,foo.getName());
hdfs.rename(foo,foo2);
final Path bar2=new Path(dir2,"foo/bar");
final Path bar3=new Path(dir3,"bar");
hdfs.rename(bar2,bar3);
hdfs.delete(foo2,true);
assertTrue(hdfs.exists(bar3));
INodeFile barNode=(INodeFile)fsdir.getINode4Write(bar3.toString());
assertSame(fsdir.getINode4Write(dir3.toString()),barNode.getParent());
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test rename where the src/dst directories are both snapshottable
* directories without snapshots. In such case we need to update the
* snapshottable dir list in SnapshotManager.
*/
@Test(timeout=60000) public void testRenameAndUpdateSnapshottableDirs() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(sdir2,"bar");
hdfs.mkdirs(foo);
hdfs.mkdirs(bar);
hdfs.allowSnapshot(foo);
SnapshotTestHelper.createSnapshot(hdfs,bar,snap1);
assertEquals(2,fsn.getSnapshottableDirListing().length);
INodeDirectory fooNode=fsdir.getINode4Write(foo.toString()).asDirectory();
long fooId=fooNode.getId();
try {
hdfs.rename(foo,bar,Rename.OVERWRITE);
fail("Expect exception since " + bar + " is snapshottable and already has snapshots");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains(bar.toString() + " is snapshottable and already has snapshots",e);
}
hdfs.deleteSnapshot(bar,snap1);
hdfs.rename(foo,bar,Rename.OVERWRITE);
SnapshottableDirectoryStatus[] dirs=fsn.getSnapshottableDirListing();
assertEquals(1,dirs.length);
assertEquals(bar,dirs[0].getFullPath());
assertEquals(fooId,dirs[0].getDirStatus().getFileId());
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
/**
* Test rename to an invalid name (xxx/.snapshot)
*/
@Test public void testRenameUndo_7() throws Exception {
final Path root=new Path("/");
final Path foo=new Path(root,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,root,snap1);
final Path invalid=new Path(foo,HdfsConstants.DOT_SNAPSHOT_DIR);
try {
hdfs.rename(bar,invalid);
fail("expect exception since invalid name is used for rename");
}
catch ( Exception e) {
GenericTestUtils.assertExceptionContains("\"" + HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name",e);
}
INodeDirectory rootNode=fsdir.getINode4Write(root.toString()).asDirectory();
INodeDirectory fooNode=fsdir.getINode4Write(foo.toString()).asDirectory();
ReadOnlyList children=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,children.size());
List diffList=fooNode.getDiffs().asList();
assertEquals(1,diffList.size());
DirectoryDiff diff=diffList.get(0);
Snapshot s1=rootNode.getSnapshot(DFSUtil.string2Bytes(snap1));
assertEquals(s1.getId(),diff.getSnapshotId());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile();
assertSame(barNode,children.get(0));
assertSame(fooNode,barNode.getParent());
List barDiffList=barNode.getDiffs().asList();
assertEquals(1,barDiffList.size());
FileDiff barDiff=barDiffList.get(0);
assertEquals(s1.getId(),barDiff.getSnapshotId());
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPL).build();
cluster.waitActive();
restartClusterAndCheckImage(true);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Rename a single file across snapshottable dirs.
*/
@Test(timeout=60000) public void testRenameFileAcrossSnapshottableDirs() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
hdfs.createSnapshot(sdir1,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
hdfs.setReplication(newfoo,REPL_1);
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo");
assertTrue(hdfs.exists(foo_s2));
FileStatus status=hdfs.getFileStatus(foo_s2);
assertEquals(REPL,status.getReplication());
final Path foo_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo");
assertFalse(hdfs.exists(foo_s3));
INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
INodeFile sfoo=fsdir.getINode(newfoo.toString()).asFile();
assertEquals(s2.getId(),sfoo.getDiffs().getLastSnapshotId());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* After the following steps:
*
* 1. Take snapshot s1 on /dir1 at time t1.
* 2. Take snapshot s2 on /dir2 at time t2.
* 3. Modify the subtree of /dir2/foo/ to make it a dir with snapshots.
* 4. Take snapshot s3 on /dir1 at time t3.
* 5. Rename /dir2/foo/ to /dir1/foo/.
*
* When changes happening on foo, the diff should be recorded in snapshot s2.
*/
@Test(timeout=60000) public void testRenameDirAcrossSnapshottableDirs() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
final Path bar=new Path(foo,"bar");
final Path bar2=new Path(foo,"bar2");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
hdfs.setReplication(bar2,REPL_1);
hdfs.delete(bar,true);
hdfs.createSnapshot(sdir1,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
final Path snapshotBar=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar");
assertTrue(hdfs.exists(snapshotBar));
final Path newBar2=new Path(newfoo,"bar2");
assertTrue(hdfs.exists(newBar2));
hdfs.delete(newBar2,true);
final Path bar2_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar2");
assertTrue(hdfs.exists(bar2_s2));
FileStatus status=hdfs.getFileStatus(bar2_s2);
assertEquals(REPL,status.getReplication());
final Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* After rename, delete the snapshot in src
*/
@Test public void testRenameDirAndDeleteSnapshot_2() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
restartClusterAndCheckImage(true);
final Path bar2=new Path(newfoo,"bar2");
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sdir1,"s4");
hdfs.delete(newfoo,true);
final Path bar2_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar2");
assertTrue(hdfs.exists(bar2_s4));
final Path bar_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar");
assertTrue(hdfs.exists(bar_s4));
hdfs.deleteSnapshot(sdir1,"s4");
restartClusterAndCheckImage(true);
Path bar_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar");
assertFalse(hdfs.exists(bar_s3));
bar_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar");
assertTrue(hdfs.exists(bar_s3));
Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
hdfs.deleteSnapshot(sdir2,"s3");
final Path bar_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar");
assertTrue(hdfs.exists(bar_s2));
INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo");
INodeReference fooRef=fsdir.getINode(foo_s2.toString()).asReference();
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount fooWC=(WithCount)fooRef.getReferredINode();
assertEquals(1,fooWC.getReferenceCount());
INodeDirectory fooDir=fooWC.getReferredINode().asDirectory();
List diffs=fooDir.getDiffs().asList();
assertEquals(1,diffs.size());
assertEquals(s2.getId(),diffs.get(0).getSnapshotId());
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir2,"s2");
assertFalse(hdfs.exists(bar_s2));
restartClusterAndCheckImage(true);
Quota.Counts q=fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(4,q.get(Quota.NAMESPACE));
assertEquals(0,q.get(Quota.DISKSPACE));
hdfs.deleteSnapshot(sdir1,"s1");
restartClusterAndCheckImage(true);
q=fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(3,q.get(Quota.NAMESPACE));
assertEquals(0,q.get(Quota.DISKSPACE));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testRenameWithOverWrite() throws Exception {
final Path root=new Path("/");
final Path foo=new Path(root,"foo");
final Path file1InFoo=new Path(foo,"file1");
final Path file2InFoo=new Path(foo,"file2");
final Path file3InFoo=new Path(foo,"file3");
DFSTestUtil.createFile(hdfs,file1InFoo,1L,REPL,SEED);
DFSTestUtil.createFile(hdfs,file2InFoo,1L,REPL,SEED);
DFSTestUtil.createFile(hdfs,file3InFoo,1L,REPL,SEED);
final Path bar=new Path(root,"bar");
hdfs.mkdirs(bar);
SnapshotTestHelper.createSnapshot(hdfs,root,"s0");
final Path fileInBar=new Path(bar,"file1");
hdfs.rename(file1InFoo,fileInBar);
final Path newDir=new Path(root,"newDir");
hdfs.rename(bar,newDir);
final Path file2InNewDir=new Path(newDir,"file2");
hdfs.rename(file2InFoo,file2InNewDir);
final Path file1InNewDir=new Path(newDir,"file1");
hdfs.rename(file3InFoo,file1InNewDir,Rename.OVERWRITE);
SnapshotTestHelper.createSnapshot(hdfs,root,"s1");
SnapshotDiffReport report=hdfs.getSnapshotDiffReport(root,"s0","s1");
LOG.info("DiffList is \n\"" + report.toString() + "\"");
List entries=report.getDiffList();
assertEquals(7,entries.size());
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null));
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,foo.getName(),null));
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,bar.getName(),null));
assertTrue(existsInDiffReport(entries,DiffType.DELETE,"foo/file1",null));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,"bar","newDir"));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,"foo/file2","newDir/file2"));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,"foo/file3","newDir/file1"));
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test renaming a file and then delete snapshots.
*/
@Test public void testRenameFileAndDeleteSnapshot() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
hdfs.createSnapshot(sdir1,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
hdfs.setReplication(newfoo,REPL_1);
hdfs.createSnapshot(sdir1,"s4");
hdfs.setReplication(newfoo,REPL_2);
FileStatus status=hdfs.getFileStatus(newfoo);
assertEquals(REPL_2,status.getReplication());
final Path foo_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo");
status=hdfs.getFileStatus(foo_s4);
assertEquals(REPL_1,status.getReplication());
hdfs.createSnapshot(sdir1,"s5");
final Path foo_s5=SnapshotTestHelper.getSnapshotPath(sdir1,"s5","foo");
status=hdfs.getFileStatus(foo_s5);
assertEquals(REPL_2,status.getReplication());
hdfs.deleteSnapshot(sdir1,"s5");
restartClusterAndCheckImage(true);
assertFalse(hdfs.exists(foo_s5));
status=hdfs.getFileStatus(foo_s4);
assertEquals(REPL_1,status.getReplication());
hdfs.deleteSnapshot(sdir1,"s4");
assertFalse(hdfs.exists(foo_s4));
Path foo_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo");
assertFalse(hdfs.exists(foo_s3));
foo_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo");
assertFalse(hdfs.exists(foo_s3));
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo");
assertTrue(hdfs.exists(foo_s2));
status=hdfs.getFileStatus(foo_s2);
assertEquals(REPL,status.getReplication());
INodeFile snode=fsdir.getINode(newfoo.toString()).asFile();
assertEquals(1,snode.getDiffs().asList().size());
INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
assertEquals(s2.getId(),snode.getDiffs().getLastSnapshotId());
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir2,"s2");
assertFalse(hdfs.exists(foo_s2));
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1,"s3");
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1,"s1");
restartClusterAndCheckImage(true);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testRenameDirectoryInSnapshot() throws Exception {
final Path sub2=new Path(sub1,"sub2");
final Path sub3=new Path(sub1,"sub3");
final Path sub2file1=new Path(sub2,"sub2file1");
final String sub1snap1="sub1snap1";
hdfs.mkdirs(sub1);
hdfs.mkdirs(sub2);
DFSTestUtil.createFile(hdfs,sub2file1,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sub1,sub1snap1);
hdfs.rename(sub2,sub3);
SnapshotDiffReport diffReport=hdfs.getSnapshotDiffReport(sub1,sub1snap1,"");
LOG.info("DiffList is \n\"" + diffReport.toString() + "\"");
List entries=diffReport.getDiffList();
assertEquals(2,entries.size());
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,sub2.getName(),sub3.getName()));
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test deleting the earliest (first) snapshot. In this simplest scenario, the
* snapshots are taken on the same directory, and we do not need to combine
* snapshot diffs.
*/
@Test(timeout=300000) public void testDeleteEarliestSnapshot1() throws Exception {
Path file0=new Path(sub,"file0");
Path file1=new Path(sub,"file1");
DFSTestUtil.createFile(hdfs,file0,BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed);
String snapshotName="s1";
try {
hdfs.deleteSnapshot(sub,snapshotName);
fail("SnapshotException expected: " + sub.toString() + " is not snapshottable yet");
}
catch ( Exception e) {
GenericTestUtils.assertExceptionContains("Directory is not a snapshottable directory: " + sub,e);
}
hdfs.allowSnapshot(sub);
try {
hdfs.deleteSnapshot(sub,snapshotName);
fail("SnapshotException expected: snapshot " + snapshotName + " does not exist for "+ sub.toString());
}
catch ( Exception e) {
GenericTestUtils.assertExceptionContains("Cannot delete snapshot " + snapshotName + " from path "+ sub.toString()+ ": the snapshot does not exist.",e);
}
SnapshotTestHelper.createSnapshot(hdfs,sub,snapshotName);
checkQuotaUsageComputation(sub,4,BLOCKSIZE * REPLICATION * 2);
hdfs.deleteSnapshot(sub,snapshotName);
checkQuotaUsageComputation(sub,3,BLOCKSIZE * REPLICATION * 2);
hdfs.createSnapshot(sub,snapshotName);
checkQuotaUsageComputation(sub,4,BLOCKSIZE * REPLICATION * 2);
Path newFile=new Path(sub,"newFile");
DFSTestUtil.createFile(hdfs,newFile,BLOCKSIZE,REPLICATION,seed);
String snapshotName2="s2";
hdfs.createSnapshot(sub,snapshotName2);
checkQuotaUsageComputation(sub,6,BLOCKSIZE * REPLICATION * 3);
Path ss=SnapshotTestHelper.getSnapshotPath(sub,snapshotName2,"newFile");
FileStatus statusBeforeDeletion=hdfs.getFileStatus(ss);
hdfs.deleteSnapshot(sub,snapshotName);
checkQuotaUsageComputation(sub,5,BLOCKSIZE * REPLICATION * 3);
FileStatus statusAfterDeletion=hdfs.getFileStatus(ss);
System.out.println("Before deletion: " + statusBeforeDeletion.toString() + "\n"+ "After deletion: "+ statusAfterDeletion.toString());
assertEquals(statusBeforeDeletion.toString(),statusAfterDeletion.toString());
}
UtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test deleting the earliest (first) snapshot. In this more complicated
* scenario, the snapshots are taken across directories.
*
* The test covers the following scenarios:
* 1. delete the first diff in the diff list of a directory
* 2. delete the first diff in the diff list of a file
*
* Also, the recursive cleanTree process should cover both INodeFile and
* INodeDirectory.
*/
@Test(timeout=300000) public void testDeleteEarliestSnapshot2() throws Exception {
Path noChangeDir=new Path(sub,"noChangeDir");
Path noChangeFile=new Path(noChangeDir,"noChangeFile");
Path metaChangeFile=new Path(noChangeDir,"metaChangeFile");
Path metaChangeDir=new Path(noChangeDir,"metaChangeDir");
Path toDeleteFile=new Path(metaChangeDir,"toDeleteFile");
DFSTestUtil.createFile(hdfs,noChangeFile,BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,metaChangeFile,BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,toDeleteFile,BLOCKSIZE,REPLICATION,seed);
final INodeFile toDeleteFileNode=TestSnapshotBlocksMap.assertBlockCollection(toDeleteFile.toString(),1,fsdir,blockmanager);
BlockInfo[] blocks=toDeleteFileNode.getBlocks();
SnapshotTestHelper.createSnapshot(hdfs,dir,"s0");
checkQuotaUsageComputation(dir,8,3 * BLOCKSIZE * REPLICATION);
hdfs.delete(toDeleteFile,true);
checkQuotaUsageComputation(dir,10,3 * BLOCKSIZE * REPLICATION);
hdfs.setReplication(metaChangeFile,REPLICATION_1);
hdfs.setOwner(metaChangeDir,"unknown","unknown");
checkQuotaUsageComputation(dir,11,3 * BLOCKSIZE * REPLICATION);
hdfs.createSnapshot(dir,"s1");
checkQuotaUsageComputation(dir,12,3 * BLOCKSIZE * REPLICATION);
hdfs.deleteSnapshot(dir,"s0");
checkQuotaUsageComputation(dir,7,2 * BLOCKSIZE * REPLICATION - BLOCKSIZE);
for ( BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b));
}
final INodeDirectory dirNode=fsdir.getINode(dir.toString()).asDirectory();
Snapshot snapshot0=dirNode.getSnapshot(DFSUtil.string2Bytes("s0"));
assertNull(snapshot0);
Snapshot snapshot1=dirNode.getSnapshot(DFSUtil.string2Bytes("s1"));
DirectoryDiffList diffList=dirNode.getDiffs();
assertEquals(1,diffList.asList().size());
assertEquals(snapshot1.getId(),diffList.getLast().getSnapshotId());
diffList=fsdir.getINode(metaChangeDir.toString()).asDirectory().getDiffs();
assertEquals(0,diffList.asList().size());
final INodeDirectory noChangeDirNode=(INodeDirectory)fsdir.getINode(noChangeDir.toString());
assertEquals(INodeDirectory.class,noChangeDirNode.getClass());
final INodeFile noChangeFileNode=(INodeFile)fsdir.getINode(noChangeFile.toString());
assertEquals(INodeFile.class,noChangeFileNode.getClass());
TestSnapshotBlocksMap.assertBlockCollection(noChangeFile.toString(),1,fsdir,blockmanager);
FileStatus status=hdfs.getFileStatus(metaChangeDir);
assertEquals("unknown",status.getOwner());
assertEquals("unknown",status.getGroup());
status=hdfs.getFileStatus(metaChangeFile);
assertEquals(REPLICATION_1,status.getReplication());
TestSnapshotBlocksMap.assertBlockCollection(metaChangeFile.toString(),1,fsdir,blockmanager);
try {
status=hdfs.getFileStatus(toDeleteFile);
fail("should throw FileNotFoundException");
}
catch ( FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: " + toDeleteFile.toString(),e);
}
final Path toDeleteFileInSnapshot=SnapshotTestHelper.getSnapshotPath(dir,"s0",toDeleteFile.toString().substring(dir.toString().length()));
try {
status=hdfs.getFileStatus(toDeleteFileInSnapshot);
fail("should throw FileNotFoundException");
}
catch ( FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: " + toDeleteFileInSnapshot.toString(),e);
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test deleting a directory which is a descendant of a snapshottable
* directory. In the test we need to cover the following cases:
*
* 1. Delete current INodeFile/INodeDirectory without taking any snapshot.
* 2. Delete current INodeFile/INodeDirectory while snapshots have been taken
* on ancestor(s).
* 3. Delete current INodeFileWithSnapshot.
* 4. Delete current INodeDirectoryWithSnapshot.
*
BooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that, if automatic HA is enabled, none of the mutative operations
* will succeed, unless the -forcemanual flag is specified.
* @throws Exception
*/
@Test public void testMutativeOperationsWithAutoHaEnabled() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
HdfsConfiguration conf=getHAConf();
conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,true);
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,getFencerTrueCommand());
tool.setConf(conf);
assertEquals(-1,runTool("-transitionToActive","nn1"));
assertTrue(errOutput.contains("Refusing to manually manage"));
assertEquals(-1,runTool("-transitionToStandby","nn1"));
assertTrue(errOutput.contains("Refusing to manually manage"));
Mockito.verify(mockProtocol,Mockito.never()).transitionToActive(anyReqInfo());
Mockito.verify(mockProtocol,Mockito.never()).transitionToStandby(anyReqInfo());
setupConfirmationOnSystemIn();
assertEquals(0,runTool("-transitionToActive","-forcemanual","nn1"));
setupConfirmationOnSystemIn();
assertEquals(0,runTool("-transitionToStandby","-forcemanual","nn1"));
Mockito.verify(mockProtocol,Mockito.times(1)).transitionToActive(reqInfoCaptor.capture());
Mockito.verify(mockProtocol,Mockito.times(1)).transitionToStandby(reqInfoCaptor.capture());
for ( StateChangeRequestInfo ri : reqInfoCaptor.getAllValues()) {
assertEquals(RequestSource.REQUEST_BY_USER_FORCED,ri.getSource());
}
}
BooleanVerifierEqualityVerifierHybridVerifier
@Test public void testTryFailoverToSafeMode() throws Exception {
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,TestDFSHAAdmin.getFencerTrueCommand());
tool.setConf(conf);
NameNodeAdapter.enterSafeMode(cluster.getNameNode(0),false);
assertEquals(-1,runTool("-failover","nn2","nn1"));
assertTrue("Bad output: " + errOutput,errOutput.contains("is not ready to become active: " + "The NameNode is in safemode"));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test the OfflineEditsViewer
*/
@Test public void testGenerated() throws IOException {
String edits=nnHelper.generateEdits();
LOG.info("Generated edits=" + edits);
String editsParsedXml=folder.newFile("editsParsed.xml").getAbsolutePath();
String editsReparsed=folder.newFile("editsParsed").getAbsolutePath();
assertEquals(0,runOev(edits,editsParsedXml,"xml",false));
assertEquals(0,runOev(editsParsedXml,editsReparsed,"binary",false));
assertTrue("Edits " + edits + " should have all op codes",hasAllOpCodes(edits));
LOG.info("Comparing generated file " + editsReparsed + " with reference file "+ edits);
assertTrue("Generated edits and reparsed (bin to XML to bin) should be same",filesEqualIgnoreTrailingZeros(edits,editsReparsed));
}
BooleanVerifierEqualityVerifierHybridVerifier
@Test public void testStored() throws IOException {
final String cacheDir=System.getProperty("test.cache.data","build/test/cache");
String editsStored=cacheDir + "/editsStored";
String editsStoredParsedXml=cacheDir + "/editsStoredParsed.xml";
String editsStoredReparsed=cacheDir + "/editsStoredReparsed";
String editsStoredXml=cacheDir + "/editsStored.xml";
assertEquals(0,runOev(editsStored,editsStoredParsedXml,"xml",false));
assertEquals(0,runOev(editsStoredParsedXml,editsStoredReparsed,"binary",false));
assertTrue("Edits " + editsStored + " should have all op codes",hasAllOpCodes(editsStored));
assertTrue("Reference XML edits and parsed to XML should be same",FileUtils.contentEqualsIgnoreEOL(new File(editsStoredXml),new File(editsStoredParsedXml),"UTF-8"));
assertTrue("Reference edits and reparsed (bin to XML to bin) should be same",filesEqualIgnoreTrailingZeros(editsStored,editsStoredReparsed));
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test case where there is no existing file
*/
@Test public void testOverwriteFile() throws IOException {
assertTrue("Creating empty dst file",DST_FILE.createNewFile());
OutputStream fos=new AtomicFileOutputStream(DST_FILE);
assertTrue("Empty file still exists",DST_FILE.exists());
fos.write(TEST_STRING.getBytes());
fos.flush();
assertEquals("",DFSTestUtil.readFile(DST_FILE));
fos.close();
String readBackData=DFSTestUtil.readFile(DST_FILE);
assertEquals(TEST_STRING,readBackData);
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test case where there is no existing file
*/
@Test public void testWriteNewFile() throws IOException {
OutputStream fos=new AtomicFileOutputStream(DST_FILE);
assertFalse(DST_FILE.exists());
fos.write(TEST_STRING.getBytes());
fos.flush();
assertFalse(DST_FILE.exists());
fos.close();
assertTrue(DST_FILE.exists());
String readBackData=DFSTestUtil.readFile(DST_FILE);
assertEquals(TEST_STRING,readBackData);
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Test case where the flush() fails at close time - make sure
* that we clean up after ourselves and don't touch any
* existing file at the destination
*/
@Test public void testFailToFlush() throws IOException {
FileOutputStream fos=new FileOutputStream(DST_FILE);
fos.write(TEST_STRING_2.getBytes());
fos.close();
OutputStream failingStream=createFailingStream();
failingStream.write(TEST_STRING.getBytes());
try {
failingStream.close();
fail("Close didn't throw exception");
}
catch ( IOException ioe) {
}
assertEquals(TEST_STRING_2,DFSTestUtil.readFile(DST_FILE));
assertEquals("Temporary file should have been cleaned up",DST_FILE.getName(),Joiner.on(",").join(TEST_DIR.list()));
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test public void testReadArrayNotEnough() throws IOException {
ExactSizeInputStream s=new ExactSizeInputStream(byteStream("he"),5);
byte[] buf=new byte[10];
assertEquals(2,s.read(buf,0,5));
try {
s.read(buf,2,3);
fail("Read buf when should be out of data");
}
catch ( EOFException e) {
}
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test public void testSkipNotEnough() throws IOException {
ExactSizeInputStream s=new ExactSizeInputStream(byteStream("he"),5);
assertEquals(2,s.skip(3));
try {
s.skip(1);
fail("Skip when should be out of data");
}
catch ( EOFException e) {
}
}
UtilityVerifierBooleanVerifierHybridVerifier
@Test public void testMark() throws IOException {
ExactSizeInputStream s=new ExactSizeInputStream(byteStream("he"),5);
assertFalse(s.markSupported());
try {
s.mark(1);
fail("Mark should not succeed");
}
catch ( UnsupportedOperationException uoe) {
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testReadNotEnough() throws IOException {
ExactSizeInputStream s=new ExactSizeInputStream(byteStream("he"),5);
assertEquals(2,s.available());
assertEquals((int)'h',s.read());
assertEquals((int)'e',s.read());
try {
s.read();
fail("Read when should be out of data");
}
catch ( EOFException e) {
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testOneElementBasic(){
LOG.info("Test one element basic");
set.add(list.get(0));
assertEquals(1,set.size());
assertFalse(set.isEmpty());
Iterator iter=set.iterator();
assertTrue(iter.hasNext());
assertEquals(list.get(0),iter.next());
assertFalse(iter.hasNext());
LOG.info("Test one element basic - DONE");
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testMultiBasic(){
LOG.info("Test multi element basic");
for ( Integer i : list) {
assertTrue(set.add(i));
}
assertEquals(list.size(),set.size());
for ( Integer i : list) {
assertTrue(set.contains(i));
}
for ( Integer i : list) {
assertFalse(set.add(i));
}
for ( Integer i : list) {
assertTrue(set.contains(i));
}
Iterator iter=set.iterator();
int num=0;
while (iter.hasNext()) {
Integer next=iter.next();
assertNotNull(next);
assertTrue(list.contains(next));
num++;
}
assertEquals(list.size(),num);
LOG.info("Test multi element basic - DONE");
}
IterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testMultiBasic(){
LOG.info("Test multi element basic");
for ( Integer i : list) {
assertTrue(set.add(i));
}
assertEquals(list.size(),set.size());
for ( Integer i : list) {
assertTrue(set.contains(i));
}
for ( Integer i : list) {
assertFalse(set.add(i));
}
for ( Integer i : list) {
assertTrue(set.contains(i));
}
Iterator iter=set.iterator();
int num=0;
while (iter.hasNext()) {
assertEquals(list.get(num++),iter.next());
}
assertEquals(list.size(),num);
LOG.info("Test multi element basic - DONE");
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testBookmarkAdvancesOnRemoveOfSameElement(){
LOG.info("Test that the bookmark advances if we remove its element.");
assertTrue(set.add(list.get(0)));
assertTrue(set.add(list.get(1)));
assertTrue(set.add(list.get(2)));
Iterator it=set.getBookmark();
assertEquals(it.next(),list.get(0));
set.remove(list.get(1));
it=set.getBookmark();
assertEquals(it.next(),list.get(2));
}
InternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testPollOneElement(){
LOG.info("Test poll one element");
set.add(list.get(0));
assertEquals(list.get(0),set.pollFirst());
assertNull(set.pollFirst());
LOG.info("Test poll one element - DONE");
}
IterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testPollAll(){
LOG.info("Test poll all");
for ( Integer i : list) {
assertTrue(set.add(i));
}
while (set.pollFirst() != null) ;
assertEquals(0,set.size());
assertTrue(set.isEmpty());
for (int i=0; i < NUM; i++) {
assertFalse(set.contains(list.get(i)));
}
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
LOG.info("Test poll all - DONE");
}
IterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testOther(){
LOG.info("Test other");
assertTrue(set.addAll(list));
Integer[] array=set.toArray(new Integer[0]);
assertEquals(NUM,array.length);
for (int i=0; i < array.length; i++) {
assertTrue(list.contains(array[i]));
}
assertEquals(NUM,set.size());
Object[] array2=set.toArray();
assertEquals(NUM,array2.length);
for (int i=0; i < array2.length; i++) {
assertTrue(list.contains(array2[i]));
}
LOG.info("Test capacity - DONE");
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testOneElementBasic(){
LOG.info("Test one element basic");
set.add(list.get(0));
assertEquals(1,set.size());
assertFalse(set.isEmpty());
Iterator iter=set.iterator();
assertTrue(iter.hasNext());
assertEquals(list.get(0),iter.next());
assertFalse(iter.hasNext());
LOG.info("Test one element basic - DONE");
}
IterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testPollNMulti(){
LOG.info("Test pollN multi");
set.addAll(list);
List l=set.pollN(10);
assertEquals(10,l.size());
for (int i=0; i < 10; i++) {
assertEquals(list.get(i),l.get(i));
}
l=set.pollN(1000);
assertEquals(NUM - 10,l.size());
for (int i=10; i < NUM; i++) {
assertEquals(list.get(i),l.get(i - 10));
}
assertTrue(set.isEmpty());
assertEquals(0,set.size());
LOG.info("Test pollN multi - DONE");
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Expect connect timeout, because the connection backlog is consumed.
*/
@Test(timeout=TEST_TIMEOUT) public void testConnectTimeout() throws Exception {
consumeConnectionBacklog();
try {
fs.listFiles(new Path("/"),false);
fail("expected timeout");
}
catch ( SocketTimeoutException e) {
assertEquals("connect timed out",e.getMessage());
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* On the second step of two-step write, expect read timeout accessing the
* redirect location, because the bogus server never sends a reply.
*/
@Test(timeout=TEST_TIMEOUT) public void testTwoStepWriteReadTimeout() throws Exception {
startSingleTemporaryRedirectResponseThread(false);
OutputStream os=null;
try {
os=fs.create(new Path("/file"));
os.close();
os=null;
fail("expected timeout");
}
catch ( SocketTimeoutException e) {
assertEquals("Read timed out",e.getMessage());
}
finally {
IOUtils.cleanup(LOG,os);
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Expect read timeout, because the bogus server never sends a reply.
*/
@Test(timeout=TEST_TIMEOUT) public void testReadTimeout() throws Exception {
try {
fs.listFiles(new Path("/"),false);
fail("expected timeout");
}
catch ( SocketTimeoutException e) {
assertEquals("Read timed out",e.getMessage());
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* After a redirect, expect read timeout accessing the redirect location,
* because the bogus server never sends a reply.
*/
@Test(timeout=TEST_TIMEOUT) public void testRedirectReadTimeout() throws Exception {
startSingleTemporaryRedirectResponseThread(false);
try {
fs.getFileChecksum(new Path("/file"));
fail("expected timeout");
}
catch ( SocketTimeoutException e) {
assertEquals("Read timed out",e.getMessage());
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* After a redirect, expect connect timeout accessing the redirect location,
* because the connection backlog is consumed.
*/
@Test(timeout=TEST_TIMEOUT) public void testRedirectConnectTimeout() throws Exception {
startSingleTemporaryRedirectResponseThread(true);
try {
fs.getFileChecksum(new Path("/file"));
fail("expected timeout");
}
catch ( SocketTimeoutException e) {
assertEquals("connect timed out",e.getMessage());
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* On the second step of two-step write, expect connect timeout accessing the
* redirect location, because the connection backlog is consumed.
*/
@Test(timeout=TEST_TIMEOUT) public void testTwoStepWriteConnectTimeout() throws Exception {
startSingleTemporaryRedirectResponseThread(true);
OutputStream os=null;
try {
os=fs.create(new Path("/file"));
fail("expected timeout");
}
catch ( SocketTimeoutException e) {
assertEquals("connect timed out",e.getMessage());
}
finally {
IOUtils.cleanup(LOG,os);
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Expect read timeout on a URL that requires auth, because the bogus server
* never sends a reply.
*/
@Test(timeout=TEST_TIMEOUT) public void testAuthUrlReadTimeout() throws Exception {
try {
fs.getDelegationToken("renewer");
fail("expected timeout");
}
catch ( SocketTimeoutException e) {
assertEquals("Read timed out",e.getMessage());
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Expect connect timeout on a URL that requires auth, because the connection
* backlog is consumed.
*/
@Test(timeout=TEST_TIMEOUT) public void testAuthUrlConnectTimeout() throws Exception {
consumeConnectionBacklog();
try {
fs.getDelegationToken("renewer");
fail("expected timeout");
}
catch ( SocketTimeoutException e) {
assertEquals("connect timed out",e.getMessage());
}
}
BooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* Test the maximum number of threads cannot be exceeded.
*/
@Test public void testMaxThreads() throws Exception {
int clientThreads=MAX_THREADS * 10;
Executor executor=Executors.newFixedThreadPool(clientThreads);
final CountDownLatch ready=new CountDownLatch(clientThreads);
final CountDownLatch start=new CountDownLatch(1);
for (int i=0; i < clientThreads; i++) {
executor.execute(new Runnable(){
@Override public void run(){
ready.countDown();
try {
start.await();
assertEquals("a:b\nc:d\n",readOutput(new URL(baseUrl,"/echo?a=b&c=d")));
int serverThreads=server.webServer.getThreadPool().getThreads();
assertTrue("More threads are started than expected, Server Threads count: " + serverThreads,serverThreads <= MAX_THREADS);
System.out.println("Number of threads = " + serverThreads + " which is less or equal than the max = "+ MAX_THREADS);
}
catch ( Exception e) {
}
}
}
);
}
ready.await();
start.countDown();
}
InternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test that the server is alive once started
* @throws Throwableon failure
*/
@Test public void testWepAppContextAfterServerStop() throws Throwable {
HttpServer2 server=null;
String key="test.attribute.key";
String value="test.attribute.value";
server=createTestServer();
assertNotLive(server);
server.start();
server.setAttribute(key,value);
assertAlive(server);
assertEquals(value,server.getAttribute(key));
stop(server);
assertNull("Server context should have cleared",server.getAttribute(key));
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testOldFormat() throws IOException {
ObjectWritable.writeObject(out,i,i.getClass(),null);
in.reset(out.getData(),out.getLength());
@SuppressWarnings("deprecation") String className=UTF8.readString(in);
assertEquals("The int[] written by ObjectWritable as a non-compact array " + "was not labelled as an array of int",i.getClass().getName(),className);
int length=in.readInt();
assertEquals("The int[] written by ObjectWritable as a non-compact array " + "was not expected length",i.length,length);
int[] readValue=new int[length];
try {
for (int i=0; i < length; i++) {
readValue[i]=(int)((Integer)ObjectWritable.readObject(in,null));
}
}
catch ( Exception e) {
fail("The int[] written by ObjectWritable as a non-compact array " + "was corrupted. Failed to correctly read int[] of length " + length + ". Got exception:\n"+ StringUtils.stringifyException(e));
}
assertTrue("The int[] written by ObjectWritable as a non-compact array " + "was corrupted.",Arrays.equals(i,readValue));
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test @SuppressWarnings("deprecation") public void testObjectLabeling() throws IOException {
ObjectWritable.writeObject(out,i,i.getClass(),null,true);
ArrayPrimitiveWritable apw=new ArrayPrimitiveWritable(i);
ObjectWritable.writeObject(out,apw,apw.getClass(),null,true);
in.reset(out.getData(),out.getLength());
String className=UTF8.readString(in);
assertEquals("The int[] written by ObjectWritable was not labelled as " + "an ArrayPrimitiveWritable.Internal",ArrayPrimitiveWritable.Internal.class.getName(),className);
ArrayPrimitiveWritable.Internal apwi=new ArrayPrimitiveWritable.Internal();
apwi.readFields(in);
assertEquals("The ArrayPrimitiveWritable.Internal component type was corrupted",int.class,apw.getComponentType());
assertTrue("The int[] written by ObjectWritable as " + "ArrayPrimitiveWritable.Internal was corrupted",Arrays.equals(i,(int[])(apwi.get())));
String declaredClassName=UTF8.readString(in);
assertEquals("The APW written by ObjectWritable was not labelled as " + "declaredClass ArrayPrimitiveWritable",ArrayPrimitiveWritable.class.getName(),declaredClassName);
className=UTF8.readString(in);
assertEquals("The APW written by ObjectWritable was not labelled as " + "class ArrayPrimitiveWritable",ArrayPrimitiveWritable.class.getName(),className);
ArrayPrimitiveWritable apw2=new ArrayPrimitiveWritable();
apw2.readFields(in);
assertEquals("The ArrayPrimitiveWritable component type was corrupted",int.class,apw2.getComponentType());
assertTrue("The int[] written by ObjectWritable as " + "ArrayPrimitiveWritable was corrupted",Arrays.equals(i,(int[])(apw2.get())));
}
IterativeVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testMany() throws IOException {
for ( Object x : bigSet) {
ObjectWritable.writeObject(out,x,x.getClass(),null,true);
(new ArrayPrimitiveWritable(x)).write(out);
}
in.reset(out.getData(),out.getLength());
for (int x=0; x < resultSet.length; ) {
resultSet[x++]=ObjectWritable.readObject(in,null);
ArrayPrimitiveWritable apw=new ArrayPrimitiveWritable();
apw.readFields(in);
resultSet[x++]=apw.get();
}
assertEquals(expectedResultSet.length,resultSet.length);
for (int x=0; x < resultSet.length; x++) {
assertEquals("ComponentType of array " + x,expectedResultSet[x].getClass().getComponentType(),resultSet[x].getClass().getComponentType());
}
assertTrue("In and Out arrays didn't match values",Arrays.deepEquals(expectedResultSet,resultSet));
}
InternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* This test was written as result of adding the new zero
* copy constructor and set method to BytesWritable. These
* methods allow users to specify the backing buffer of the
* BytesWritable instance and a length.
*/
@Test public void testZeroCopy(){
byte[] bytes="brock".getBytes();
BytesWritable zeroBuf=new BytesWritable(bytes,bytes.length);
BytesWritable copyBuf=new BytesWritable(bytes);
assertTrue("copy took place, backing array != array passed to constructor",bytes == zeroBuf.getBytes());
assertTrue("length of BW should backing byte array",zeroBuf.getLength() == bytes.length);
assertEquals("objects with same backing array should be equal",zeroBuf,copyBuf);
assertEquals("string repr of objects with same backing array should be equal",zeroBuf.toString(),copyBuf.toString());
assertTrue("compare order objects with same backing array should be equal",zeroBuf.compareTo(copyBuf) == 0);
assertTrue("hash of objects with same backing array should be equal",zeroBuf.hashCode() == copyBuf.hashCode());
byte[] buffer=new byte[bytes.length * 5];
zeroBuf.set(buffer,0,buffer.length);
zeroBuf.set(bytes,0,bytes.length);
assertEquals("buffer created with (array, len) has bad contents",zeroBuf,copyBuf);
assertTrue("buffer created with (array, len) has bad length",zeroBuf.getLength() == copyBuf.getLength());
}
APIUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* test {@code MapFile.Reader.getClosest()} method
*/
@Test public void testGetClosestOnCurrentApi() throws Exception {
final String TEST_PREFIX="testGetClosestOnCurrentApi.mapfile";
MapFile.Writer writer=null;
MapFile.Reader reader=null;
try {
writer=createWriter(TEST_PREFIX,Text.class,Text.class);
int FIRST_KEY=1;
for (int i=FIRST_KEY; i < 100; i+=10) {
Text t=new Text(Integer.toString(i));
writer.append(t,t);
}
writer.close();
reader=createReader(TEST_PREFIX,Text.class);
Text key=new Text("55");
Text value=new Text();
Text closest=(Text)reader.getClosest(key,value);
assertEquals(new Text("61"),closest);
closest=(Text)reader.getClosest(key,value,true);
assertEquals(new Text("51"),closest);
final Text explicitKey=new Text("21");
closest=(Text)reader.getClosest(explicitKey,value);
assertEquals(new Text("21"),explicitKey);
key=new Text("00");
closest=(Text)reader.getClosest(key,value);
assertEquals(FIRST_KEY,Integer.parseInt(closest.toString()));
key=new Text("92");
closest=(Text)reader.getClosest(key,value);
assertNull("Not null key in testGetClosestWithNewCode",closest);
closest=(Text)reader.getClosest(key,value,true);
assertEquals(new Text("91"),closest);
}
finally {
IOUtils.cleanup(null,writer,reader);
}
}
BranchVerifierTestInitializerUtilityVerifierHybridVerifier
@Before public void setup() throws Exception {
LocalFileSystem fs=FileSystem.getLocal(conf);
if (fs.exists(TEST_DIR) && !fs.delete(TEST_DIR,true)) {
Assert.fail("Can't clean up test root dir");
}
fs.mkdirs(TEST_DIR);
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* test {@code MapFile.Reader.finalKey()} method
*/
@Test public void testOnFinalKey(){
final String TEST_METHOD_KEY="testOnFinalKey.mapfile";
int SIZE=10;
MapFile.Writer writer=null;
MapFile.Reader reader=null;
try {
writer=createWriter(TEST_METHOD_KEY,IntWritable.class,IntWritable.class);
for (int i=0; i < SIZE; i++) writer.append(new IntWritable(i),new IntWritable(i));
writer.close();
reader=createReader(TEST_METHOD_KEY,IntWritable.class);
IntWritable expectedKey=new IntWritable(0);
reader.finalKey(expectedKey);
assertEquals("testOnFinalKey not same !!!",expectedKey,new IntWritable(9));
}
catch ( IOException ex) {
fail("testOnFinalKey error !!!");
}
finally {
IOUtils.cleanup(null,writer,reader);
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* test {@code MapFile.rename()}
* method with throwing {@code IOException}
*/
@Test public void testRenameWithException(){
final String ERROR_MESSAGE="Can't rename file";
final String NEW_FILE_NAME="test-new.mapfile";
final String OLD_FILE_NAME="test-old.mapfile";
MapFile.Writer writer=null;
try {
FileSystem fs=FileSystem.getLocal(conf);
FileSystem spyFs=spy(fs);
writer=createWriter(OLD_FILE_NAME,IntWritable.class,IntWritable.class);
writer.close();
Path oldDir=new Path(TEST_DIR,OLD_FILE_NAME);
Path newDir=new Path(TEST_DIR,NEW_FILE_NAME);
when(spyFs.rename(oldDir,newDir)).thenThrow(new IOException(ERROR_MESSAGE));
MapFile.rename(spyFs,oldDir.toString(),newDir.toString());
fail("testRenameWithException no exception error !!!");
}
catch ( IOException ex) {
assertEquals("testRenameWithException invalid IOExceptionMessage !!!",ex.getMessage(),ERROR_MESSAGE);
}
finally {
IOUtils.cleanup(null,writer);
}
}
APIUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test getClosest feature.
* @throws Exception
*/
@Test @SuppressWarnings("deprecation") public void testGetClosest() throws Exception {
Path dirName=new Path(TEST_DIR,"testGetClosest.mapfile");
FileSystem fs=FileSystem.getLocal(conf);
Path qualifiedDirName=fs.makeQualified(dirName);
MapFile.Writer.setIndexInterval(conf,3);
MapFile.Writer writer=null;
MapFile.Reader reader=null;
try {
writer=new MapFile.Writer(conf,fs,qualifiedDirName.toString(),Text.class,Text.class);
assertEquals(3,writer.getIndexInterval());
final int FIRST_KEY=10;
for (int i=FIRST_KEY; i < 100; i+=10) {
String iStr=Integer.toString(i);
Text t=new Text("00".substring(iStr.length()) + iStr);
writer.append(t,t);
}
writer.close();
reader=new MapFile.Reader(qualifiedDirName,conf);
Text key=new Text("55");
Text value=new Text();
Text closest=(Text)reader.getClosest(key,value);
assertEquals(new Text("60"),closest);
closest=(Text)reader.getClosest(key,value,true);
assertEquals(new Text("50"),closest);
final Text TWENTY=new Text("20");
closest=(Text)reader.getClosest(TWENTY,value);
assertEquals(TWENTY,closest);
closest=(Text)reader.getClosest(TWENTY,value,true);
assertEquals(TWENTY,closest);
key=new Text("00");
closest=(Text)reader.getClosest(key,value);
assertEquals(FIRST_KEY,Integer.parseInt(closest.toString()));
closest=(Text)reader.getClosest(key,value,true);
assertNull(closest);
key=new Text("99");
closest=(Text)reader.getClosest(key,value);
assertNull(closest);
closest=(Text)reader.getClosest(key,value,true);
assertEquals(new Text("90"),closest);
}
finally {
IOUtils.cleanup(null,writer,reader);
}
}
UtilityVerifierNullVerifierHybridVerifier
/**
* test {@code MapFile.Writer} constructor with key, value
* and validate it with {@code keyClass(), valueClass()} methods
*/
@Test public void testKeyValueClasses(){
Class extends WritableComparable>> keyClass=IntWritable.class;
Class> valueClass=Text.class;
try {
createWriter("testKeyValueClasses.mapfile",IntWritable.class,Text.class).close();
assertNotNull("writer key class null error !!!",MapFile.Writer.keyClass(keyClass));
assertNotNull("writer value class null error !!!",MapFile.Writer.valueClass(valueClass));
}
catch ( IOException ex) {
fail(ex.getMessage());
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierNullVerifierHybridVerifier
/**
* test all available constructor for {@code MapFile.Writer}
*/
@Test @SuppressWarnings("deprecation") public void testDeprecatedConstructors(){
String path=new Path(TEST_DIR,"writes.mapfile").toString();
MapFile.Writer writer=null;
MapFile.Reader reader=null;
try {
FileSystem fs=FileSystem.getLocal(conf);
writer=new MapFile.Writer(conf,fs,path,IntWritable.class,Text.class,CompressionType.RECORD);
assertNotNull(writer);
writer.close();
writer=new MapFile.Writer(conf,fs,path,IntWritable.class,Text.class,CompressionType.RECORD,defaultProgressable);
assertNotNull(writer);
writer.close();
writer=new MapFile.Writer(conf,fs,path,IntWritable.class,Text.class,CompressionType.RECORD,defaultCodec,defaultProgressable);
assertNotNull(writer);
writer.close();
writer=new MapFile.Writer(conf,fs,path,WritableComparator.get(Text.class),Text.class);
assertNotNull(writer);
writer.close();
writer=new MapFile.Writer(conf,fs,path,WritableComparator.get(Text.class),Text.class,SequenceFile.CompressionType.RECORD);
assertNotNull(writer);
writer.close();
writer=new MapFile.Writer(conf,fs,path,WritableComparator.get(Text.class),Text.class,CompressionType.RECORD,defaultProgressable);
assertNotNull(writer);
writer.close();
reader=new MapFile.Reader(fs,path,WritableComparator.get(IntWritable.class),conf);
assertNotNull(reader);
assertNotNull("reader key is null !!!",reader.getKeyClass());
assertNotNull("reader value in null",reader.getValueClass());
}
catch ( IOException e) {
fail(e.getMessage());
}
finally {
IOUtils.cleanup(null,writer,reader);
}
}
APIUtilityVerifierIterativeVerifierUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierHybridVerifier
/**
* test {@code MapFile.Reader.next(key, value)} for iteration.
*/
@Test public void testReaderKeyIteration(){
final String TEST_METHOD_KEY="testReaderKeyIteration.mapfile";
int SIZE=10;
int ITERATIONS=5;
MapFile.Writer writer=null;
MapFile.Reader reader=null;
try {
writer=createWriter(TEST_METHOD_KEY,IntWritable.class,Text.class);
int start=0;
for (int i=0; i < SIZE; i++) writer.append(new IntWritable(i),new Text("Value:" + i));
writer.close();
reader=createReader(TEST_METHOD_KEY,IntWritable.class);
Writable startValue=new Text("Value:" + start);
int i=0;
while (i++ < ITERATIONS) {
IntWritable key=new IntWritable(start);
Writable value=startValue;
while (reader.next(key,value)) {
assertNotNull(key);
assertNotNull(value);
}
reader.reset();
}
assertTrue("reader seek error !!!",reader.seek(new IntWritable(SIZE / 2)));
assertFalse("reader seek error !!!",reader.seek(new IntWritable(SIZE * 2)));
}
catch ( IOException ex) {
fail("reader seek error !!!");
}
finally {
IOUtils.cleanup(null,writer,reader);
}
}
APIUtilityVerifierBranchVerifierUtilityVerifierBooleanVerifierHybridVerifier
/**
* test {@code MapFile.Writer.testFix} method
*/
@Test public void testFix(){
final String INDEX_LESS_MAP_FILE="testFix.mapfile";
int PAIR_SIZE=20;
MapFile.Writer writer=null;
try {
FileSystem fs=FileSystem.getLocal(conf);
Path dir=new Path(TEST_DIR,INDEX_LESS_MAP_FILE);
writer=createWriter(INDEX_LESS_MAP_FILE,IntWritable.class,Text.class);
for (int i=0; i < PAIR_SIZE; i++) writer.append(new IntWritable(0),new Text("value"));
writer.close();
File indexFile=new File(".","." + INDEX_LESS_MAP_FILE + "/index");
boolean isDeleted=false;
if (indexFile.exists()) isDeleted=indexFile.delete();
if (isDeleted) assertTrue("testFix error !!!",MapFile.fix(fs,dir,IntWritable.class,Text.class,true,conf) == PAIR_SIZE);
}
catch ( Exception ex) {
fail("testFix error !!!");
}
finally {
IOUtils.cleanup(null,writer);
}
}
UtilityVerifierBooleanVerifierHybridVerifier
@Test public void testRenameWithFalse(){
final String ERROR_MESSAGE="Could not rename";
final String NEW_FILE_NAME="test-new.mapfile";
final String OLD_FILE_NAME="test-old.mapfile";
MapFile.Writer writer=null;
try {
FileSystem fs=FileSystem.getLocal(conf);
FileSystem spyFs=spy(fs);
writer=createWriter(OLD_FILE_NAME,IntWritable.class,IntWritable.class);
writer.close();
Path oldDir=new Path(TEST_DIR,OLD_FILE_NAME);
Path newDir=new Path(TEST_DIR,NEW_FILE_NAME);
when(spyFs.rename(oldDir,newDir)).thenReturn(false);
MapFile.rename(spyFs,oldDir.toString(),newDir.toString());
fail("testRenameWithException no exception error !!!");
}
catch ( IOException ex) {
assertTrue("testRenameWithFalse invalid IOExceptionMessage error !!!",ex.getMessage().startsWith(ERROR_MESSAGE));
}
finally {
IOUtils.cleanup(null,writer);
}
}
Class: org.apache.hadoop.io.TestSecureIOUtils
UtilityVerifierAssumptionSetterHybridVerifier
@Test(timeout=10000) public void testReadIncorrectlyRestrictedWithSecurity() throws IOException {
assumeTrue(NativeIO.isAvailable());
System.out.println("Running test with native libs...");
String invalidUser="InvalidUser";
try {
SecureIOUtils.forceSecureOpenForRead(testFilePathIs,invalidUser,realGroup).close();
fail("Didn't throw expection for wrong user ownership!");
}
catch ( IOException ioe) {
}
try {
SecureIOUtils.forceSecureOpenFSDataInputStream(testFilePathFadis,invalidUser,realGroup).close();
fail("Didn't throw expection for wrong user ownership!");
}
catch ( IOException ioe) {
}
try {
SecureIOUtils.forceSecureOpenForRandomRead(testFilePathRaf,"r",invalidUser,realGroup).close();
fail("Didn't throw expection for wrong user ownership!");
}
catch ( IOException ioe) {
}
}
Class: org.apache.hadoop.io.TestSortedMapWritable
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=1000) public void testPutAll(){
SortedMapWritable map1=new SortedMapWritable();
SortedMapWritable map2=new SortedMapWritable();
map1.put(new Text("key"),new Text("value"));
map2.putAll(map1);
assertEquals("map1 entries don't match map2 entries",map1,map2);
assertTrue("map2 doesn't have class information from map1",map2.classToIdMap.containsKey(Text.class) && map2.idToClassMap.containsValue(Text.class));
}
IterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* the test
*/
@Test @SuppressWarnings("unchecked") public void testSortedMapWritable(){
Text[] keys={new Text("key1"),new Text("key2"),new Text("key3")};
BytesWritable[] values={new BytesWritable("value1".getBytes()),new BytesWritable("value2".getBytes()),new BytesWritable("value3".getBytes())};
SortedMapWritable inMap=new SortedMapWritable();
for (int i=0; i < keys.length; i++) {
inMap.put(keys[i],values[i]);
}
assertEquals(0,inMap.firstKey().compareTo(keys[0]));
assertEquals(0,inMap.lastKey().compareTo(keys[2]));
SortedMapWritable outMap=new SortedMapWritable(inMap);
assertEquals(inMap.size(),outMap.size());
for ( Map.Entry e : inMap.entrySet()) {
assertTrue(outMap.containsKey(e.getKey()));
assertEquals(0,((WritableComparable)outMap.get(e.getKey())).compareTo(e.getValue()));
}
Text[] maps={new Text("map1"),new Text("map2")};
SortedMapWritable mapOfMaps=new SortedMapWritable();
mapOfMaps.put(maps[0],inMap);
mapOfMaps.put(maps[1],outMap);
SortedMapWritable copyOfMapOfMaps=new SortedMapWritable(mapOfMaps);
for (int i=0; i < maps.length; i++) {
assertTrue(copyOfMapOfMaps.containsKey(maps[i]));
SortedMapWritable a=(SortedMapWritable)mapOfMaps.get(maps[i]);
SortedMapWritable b=(SortedMapWritable)copyOfMapOfMaps.get(maps[i]);
assertEquals(a.size(),b.size());
for ( Writable key : a.keySet()) {
assertTrue(b.containsKey(key));
WritableComparable aValue=(WritableComparable)a.get(key);
WritableComparable bValue=(WritableComparable)b.get(key);
assertEquals(0,aValue.compareTo(bValue));
}
}
}
InternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Tests if equal and hashCode method still hold the contract.
*/
@Test public void testEqualsAndHashCode(){
String failureReason;
SortedMapWritable mapA=new SortedMapWritable();
SortedMapWritable mapB=new SortedMapWritable();
failureReason="SortedMapWritable couldn't be initialized. Got null reference";
assertNotNull(failureReason,mapA);
assertNotNull(failureReason,mapB);
assertFalse("equals method returns true when passed null",mapA.equals(null));
assertTrue("Two empty SortedMapWritables are no longer equal",mapA.equals(mapB));
Text[] keys={new Text("key1"),new Text("key2")};
BytesWritable[] values={new BytesWritable("value1".getBytes()),new BytesWritable("value2".getBytes())};
mapA.put(keys[0],values[0]);
mapB.put(keys[1],values[1]);
failureReason="Two SortedMapWritables with different data are now equal";
assertTrue(failureReason,mapA.hashCode() != mapB.hashCode());
assertTrue(failureReason,!mapA.equals(mapB));
assertTrue(failureReason,!mapB.equals(mapA));
mapA.put(keys[1],values[1]);
mapB.put(keys[0],values[0]);
failureReason="Two SortedMapWritables with same entry sets formed in different order are now different";
assertEquals(failureReason,mapA.hashCode(),mapB.hashCode());
assertTrue(failureReason,mapA.equals(mapB));
assertTrue(failureReason,mapB.equals(mapA));
mapA.put(keys[0],values[1]);
mapA.put(keys[1],values[0]);
failureReason="Two SortedMapWritables with different content are now equal";
assertTrue(failureReason,mapA.hashCode() != mapB.hashCode());
assertTrue(failureReason,!mapA.equals(mapB));
assertTrue(failureReason,!mapB.equals(mapA));
}
Class: org.apache.hadoop.io.compress.TestCodec
APIUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testGzipCodecRead() throws IOException {
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,false);
assertFalse("ZlibFactory is using native libs against request",ZlibFactory.isNativeZlibLoaded(conf));
Decompressor zlibDecompressor=ZlibFactory.getZlibDecompressor(conf);
assertNotNull("zlibDecompressor is null!",zlibDecompressor);
assertTrue("ZlibFactory returned unexpected inflator",zlibDecompressor instanceof BuiltInZlibInflater);
CodecPool.returnDecompressor(zlibDecompressor);
String tmpDir=System.getProperty("test.build.data","/tmp/");
Path f=new Path(new Path(tmpDir),"testGzipCodecRead.txt.gz");
BufferedWriter bw=new BufferedWriter(new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(f.toString()))));
final String msg="This is the message in the file!";
bw.write(msg);
bw.close();
CompressionCodecFactory ccf=new CompressionCodecFactory(conf);
CompressionCodec codec=ccf.getCodec(f);
Decompressor decompressor=CodecPool.getDecompressor(codec);
FileSystem fs=FileSystem.getLocal(conf);
InputStream is=fs.open(f);
is=codec.createInputStream(is,decompressor);
BufferedReader br=new BufferedReader(new InputStreamReader(is));
String line=br.readLine();
assertEquals("Didn't get the same message back!",msg,line);
br.close();
}
UtilityVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testOpenWithCreate() throws Exception {
if (Path.WINDOWS) {
return;
}
LOG.info("Test creating a file with O_CREAT");
FileDescriptor fd=NativeIO.POSIX.open(new File(TEST_DIR,"testWorkingOpen").getAbsolutePath(),NativeIO.POSIX.O_WRONLY | NativeIO.POSIX.O_CREAT,0700);
assertNotNull(true);
assertTrue(fd.valid());
FileOutputStream fos=new FileOutputStream(fd);
fos.write("foo".getBytes());
fos.close();
assertFalse(fd.valid());
LOG.info("Test exclusive create");
try {
fd=NativeIO.POSIX.open(new File(TEST_DIR,"testWorkingOpen").getAbsolutePath(),NativeIO.POSIX.O_WRONLY | NativeIO.POSIX.O_CREAT | NativeIO.POSIX.O_EXCL,0700);
fail("Was able to create existing file with O_EXCL");
}
catch ( NativeIOException nioe) {
LOG.info("Got expected exception for failed exclusive create",nioe);
assertEquals(Errno.EEXIST,nioe.getErrno());
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test for races in fstat usage
* NOTE: this test is likely to fail on RHEL 6.0 which has a non-threadsafe
* implementation of getpwuid_r.
*/
@Test(timeout=30000) public void testMultiThreadedFstat() throws Exception {
if (Path.WINDOWS) {
return;
}
final FileOutputStream fos=new FileOutputStream(new File(TEST_DIR,"testfstat"));
final AtomicReference thrown=new AtomicReference();
List statters=new ArrayList();
for (int i=0; i < 10; i++) {
Thread statter=new Thread(){
@Override public void run(){
long et=Time.now() + 5000;
while (Time.now() < et) {
try {
NativeIO.POSIX.Stat stat=NativeIO.POSIX.getFstat(fos.getFD());
assertEquals(System.getProperty("user.name"),stat.getOwner());
assertNotNull(stat.getGroup());
assertTrue(!stat.getGroup().isEmpty());
assertEquals("Stat mode field should indicate a regular file",NativeIO.POSIX.Stat.S_IFREG,stat.getMode() & NativeIO.POSIX.Stat.S_IFMT);
}
catch ( Throwable t) {
thrown.set(t);
}
}
}
}
;
statters.add(statter);
statter.start();
}
for ( Thread t : statters) {
t.join();
}
fos.close();
if (thrown.get() != null) {
throw new RuntimeException(thrown.get());
}
}
APIUtilityVerifierAssumptionSetterEqualityVerifierHybridVerifier
@Test(timeout=10000) public void testMlock() throws Exception {
assumeTrue(NativeIO.isAvailable());
final File TEST_FILE=new File(new File(System.getProperty("test.build.data","build/test/data")),"testMlockFile");
final int BUF_LEN=12289;
byte buf[]=new byte[BUF_LEN];
int bufSum=0;
for (int i=0; i < buf.length; i++) {
buf[i]=(byte)(i % 60);
bufSum+=buf[i];
}
FileOutputStream fos=new FileOutputStream(TEST_FILE);
try {
fos.write(buf);
fos.getChannel().force(true);
}
finally {
fos.close();
}
FileInputStream fis=null;
FileChannel channel=null;
try {
fis=new FileInputStream(TEST_FILE);
channel=fis.getChannel();
long fileSize=channel.size();
MappedByteBuffer mapbuf=channel.map(MapMode.READ_ONLY,0,fileSize);
NativeIO.POSIX.mlock(mapbuf,fileSize);
int sum=0;
for (int i=0; i < fileSize; i++) {
sum+=mapbuf.get(i);
}
assertEquals("Expected sums to be equal",bufSum,sum);
NativeIO.POSIX.munmap(mapbuf);
}
finally {
if (channel != null) {
channel.close();
}
if (fis != null) {
fis.close();
}
}
}
IterativeVerifierBooleanVerifierNullVerifierHybridVerifier
/**
* Test that opens and closes a file 10000 times - this would crash with
* "Too many open files" if we leaked fds using this access pattern.
*/
@Test(timeout=30000) public void testFDDoesntLeak() throws IOException {
if (Path.WINDOWS) {
return;
}
for (int i=0; i < 10000; i++) {
FileDescriptor fd=NativeIO.POSIX.open(new File(TEST_DIR,"testNoFdLeak").getAbsolutePath(),NativeIO.POSIX.O_WRONLY | NativeIO.POSIX.O_CREAT,0700);
assertNotNull(true);
assertTrue(fd.valid());
FileOutputStream fos=new FileOutputStream(fd);
fos.write("foo".getBytes());
fos.close();
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testFstat() throws Exception {
FileOutputStream fos=new FileOutputStream(new File(TEST_DIR,"testfstat"));
NativeIO.POSIX.Stat stat=NativeIO.POSIX.getFstat(fos.getFD());
fos.close();
LOG.info("Stat: " + String.valueOf(stat));
String owner=stat.getOwner();
String expectedOwner=System.getProperty("user.name");
if (Path.WINDOWS) {
UserGroupInformation ugi=UserGroupInformation.createRemoteUser(expectedOwner);
final String adminsGroupString="Administrators";
if (Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString)) {
expectedOwner=adminsGroupString;
}
}
assertEquals(expectedOwner,owner);
assertNotNull(stat.getGroup());
assertTrue(!stat.getGroup().isEmpty());
assertEquals("Stat mode field should indicate a regular file",NativeIO.POSIX.Stat.S_IFREG,stat.getMode() & NativeIO.POSIX.Stat.S_IFMT);
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testOpenMissingWithoutCreate() throws Exception {
if (Path.WINDOWS) {
return;
}
LOG.info("Open a missing file without O_CREAT and it should fail");
try {
FileDescriptor fd=NativeIO.POSIX.open(new File(TEST_DIR,"doesntexist").getAbsolutePath(),NativeIO.POSIX.O_WRONLY,0700);
fail("Able to open a new file without O_CREAT");
}
catch ( NativeIOException nioe) {
LOG.info("Got expected exception",nioe);
assertEquals(Errno.ENOENT,nioe.getErrno());
}
}
APIUtilityVerifierUtilityVerifierBooleanVerifierNullVerifierHybridVerifier
@Test(timeout=60000) public void testStandAloneClient() throws IOException {
Client client=new Client(LongWritable.class,conf);
InetSocketAddress address=new InetSocketAddress("127.0.0.1",10);
try {
client.call(new LongWritable(RANDOM.nextLong()),address,null,null,0,conf);
fail("Expected an exception to have been thrown");
}
catch ( IOException e) {
String message=e.getMessage();
String addressText=address.getHostName() + ":" + address.getPort();
assertTrue("Did not find " + addressText + " in "+ message,message.contains(addressText));
Throwable cause=e.getCause();
assertNotNull("No nested exception in " + e,cause);
String causeText=cause.getMessage();
assertTrue("Did not find " + causeText + " in "+ message,message.contains(causeText));
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test if the rpc server gets the retry count from client.
*/
@Test(timeout=60000) public void testCallRetryCount() throws IOException {
final int retryCount=255;
final Client client=new Client(LongWritable.class,conf);
Client.setCallIdAndRetryCount(Client.nextCallId(),255);
final TestServer server=new TestServer(1,false);
server.callListener=new Runnable(){
@Override public void run(){
Assert.assertEquals(retryCount,Server.getCallRetryCount());
}
}
;
try {
InetSocketAddress addr=NetUtils.getConnectAddress(server);
server.start();
final SerialCaller caller=new SerialCaller(client,addr,10);
caller.run();
assertFalse(caller.failed);
}
finally {
client.stop();
server.stop();
}
}
BooleanVerifierAssumptionSetterHybridVerifier
/**
* Check that file descriptors aren't leaked by starting
* and stopping IPC servers.
*/
@Test(timeout=60000) public void testSocketLeak() throws IOException {
Assume.assumeTrue(FD_DIR.exists());
long startFds=countOpenFileDescriptors();
for (int i=0; i < 50; i++) {
Server server=new TestServer(1,true);
server.start();
server.stop();
}
long endFds=countOpenFileDescriptors();
assertTrue("Leaked " + (endFds - startFds) + " file descriptors",endFds - startFds < 20);
}
IterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testConnectionIdleTimeouts() throws Exception {
((Log4JLogger)Server.LOG).getLogger().setLevel(Level.DEBUG);
final int maxIdle=1000;
final int cleanupInterval=maxIdle * 3 / 4;
final int killMax=3;
final int clients=1 + killMax * 2;
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,maxIdle);
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_IDLETHRESHOLD_KEY,0);
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_KILL_MAX_KEY,killMax);
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_KEY,cleanupInterval);
final CyclicBarrier firstCallBarrier=new CyclicBarrier(2);
final CyclicBarrier callBarrier=new CyclicBarrier(clients);
final CountDownLatch allCallLatch=new CountDownLatch(clients);
final AtomicBoolean error=new AtomicBoolean();
final TestServer server=new TestServer(clients,false);
Thread[] threads=new Thread[clients];
try {
server.callListener=new Runnable(){
AtomicBoolean first=new AtomicBoolean(true);
@Override public void run(){
try {
allCallLatch.countDown();
if (first.compareAndSet(true,false)) {
firstCallBarrier.await();
}
else {
callBarrier.await();
}
}
catch ( Throwable t) {
LOG.error(t);
error.set(true);
}
}
}
;
server.start();
final CountDownLatch callReturned=new CountDownLatch(clients - 1);
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
final Configuration clientConf=new Configuration();
clientConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,10000);
for (int i=0; i < clients; i++) {
threads[i]=new Thread(new Runnable(){
@Override public void run(){
Client client=new Client(LongWritable.class,clientConf);
try {
client.call(new LongWritable(Thread.currentThread().getId()),addr,null,null,0,clientConf);
callReturned.countDown();
Thread.sleep(10000);
}
catch ( IOException e) {
LOG.error(e);
}
catch ( InterruptedException e) {
}
}
}
);
threads[i].start();
}
allCallLatch.await();
assertFalse(error.get());
assertEquals(clients,server.getNumOpenConnections());
callBarrier.await();
callReturned.await();
assertEquals(clients,server.getNumOpenConnections());
Thread.sleep(maxIdle * 2 - cleanupInterval);
for (int i=clients; i > 1; i-=killMax) {
Thread.sleep(cleanupInterval);
assertFalse(error.get());
assertEquals(i,server.getNumOpenConnections());
}
Thread.sleep(cleanupInterval);
assertFalse(error.get());
assertEquals(1,server.getNumOpenConnections());
firstCallBarrier.await();
Thread.sleep(maxIdle * 2);
assertFalse(error.get());
assertEquals(0,server.getNumOpenConnections());
}
finally {
for ( Thread t : threads) {
if (t != null) {
t.interrupt();
t.join();
}
server.stop();
}
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* Test if
* (1) the rpc server uses the call id/retry provided by the rpc client, and
* (2) the rpc client receives the same call id/retry from the rpc server.
*/
@Test(timeout=60000) public void testCallIdAndRetry() throws IOException {
final CallInfo info=new CallInfo();
final Client client=new Client(LongWritable.class,conf){
@Override Call createCall( RpcKind rpcKind, Writable rpcRequest){
final Call call=super.createCall(rpcKind,rpcRequest);
info.id=call.id;
info.retry=call.retry;
return call;
}
@Override void checkResponse( RpcResponseHeaderProto header) throws IOException {
super.checkResponse(header);
Assert.assertEquals(info.id,header.getCallId());
Assert.assertEquals(info.retry,header.getRetryCount());
}
}
;
final TestServer server=new TestServer(1,false);
server.callListener=new Runnable(){
@Override public void run(){
Assert.assertEquals(info.id,Server.getCallId());
Assert.assertEquals(info.retry,Server.getCallRetryCount());
}
}
;
try {
InetSocketAddress addr=NetUtils.getConnectAddress(server);
server.start();
final SerialCaller caller=new SerialCaller(client,addr,10);
caller.run();
assertFalse(caller.failed);
}
finally {
client.stop();
server.stop();
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test if the rpc server gets the default retry count (0) from client.
*/
@Test(timeout=60000) public void testInitialCallRetryCount() throws IOException {
final Client client=new Client(LongWritable.class,conf);
final TestServer server=new TestServer(1,false);
server.callListener=new Runnable(){
@Override public void run(){
Assert.assertEquals(0,Server.getCallRetryCount());
}
}
;
try {
InetSocketAddress addr=NetUtils.getConnectAddress(server);
server.start();
final SerialCaller caller=new SerialCaller(client,addr,10);
caller.run();
assertFalse(caller.failed);
}
finally {
client.stop();
server.stop();
}
}
APIUtilityVerifierIterativeVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Tests that client generates a unique sequential call ID for each RPC call,
* even if multiple threads are using the same client.
* @throws InterruptedException
*/
@Test(timeout=60000) public void testUniqueSequentialCallIds() throws IOException, InterruptedException {
int serverThreads=10, callerCount=100, perCallerCallCount=100;
TestServer server=new TestServer(serverThreads,false);
final List callIds=Collections.synchronizedList(new ArrayList());
server.callListener=new Runnable(){
@Override public void run(){
callIds.add(Server.getCallId());
}
}
;
Client client=new Client(LongWritable.class,conf);
try {
InetSocketAddress addr=NetUtils.getConnectAddress(server);
server.start();
SerialCaller[] callers=new SerialCaller[callerCount];
for (int i=0; i < callerCount; ++i) {
callers[i]=new SerialCaller(client,addr,perCallerCallCount);
callers[i].start();
}
for (int i=0; i < callerCount; ++i) {
callers[i].join();
assertFalse(callers[i].failed);
}
}
finally {
client.stop();
server.stop();
}
int expectedCallCount=callerCount * perCallerCallCount;
assertEquals(expectedCallCount,callIds.size());
Collections.sort(callIds);
final int startID=callIds.get(0).intValue();
for (int i=0; i < expectedCallCount; ++i) {
assertEquals(startID + i,callIds.get(i).intValue());
}
}
UtilityVerifierBooleanVerifierHybridVerifier
/**
* Test that, if the socket factory throws an IOE, it properly propagates
* to the client.
*/
@Test(timeout=60000) public void testSocketFactoryException() throws IOException {
SocketFactory mockFactory=mock(SocketFactory.class);
doThrow(new IOException("Injected fault")).when(mockFactory).createSocket();
Client client=new Client(LongWritable.class,conf,mockFactory);
InetSocketAddress address=new InetSocketAddress("127.0.0.1",10);
try {
client.call(new LongWritable(RANDOM.nextLong()),address,null,null,0,conf);
fail("Expected an exception to have been thrown");
}
catch ( IOException e) {
assertTrue(e.getMessage().contains("Injected fault"));
}
}
UtilityVerifierBooleanVerifierHybridVerifier
/**
* Test that, if a RuntimeException is thrown after creating a socket
* but before successfully connecting to the IPC server, that the
* failure is handled properly. This is a regression test for
* HADOOP-7428.
*/
@Test(timeout=60000) public void testRTEDuringConnectionSetup() throws IOException {
SocketFactory spyFactory=spy(NetUtils.getDefaultSocketFactory(conf));
Mockito.doAnswer(new Answer(){
@Override public Socket answer( InvocationOnMock invocation) throws Throwable {
Socket s=spy((Socket)invocation.callRealMethod());
doThrow(new RuntimeException("Injected fault")).when(s).setSoTimeout(anyInt());
return s;
}
}
).when(spyFactory).createSocket();
Server server=new TestServer(1,true);
server.start();
try {
InetSocketAddress address=NetUtils.getConnectAddress(server);
Client client=new Client(LongWritable.class,conf,spyFactory);
try {
client.call(new LongWritable(RANDOM.nextLong()),address,null,null,0,conf);
fail("Expected an exception to have been thrown");
}
catch ( Exception e) {
LOG.info("caught expected exception",e);
assertTrue(StringUtils.stringifyException(e).contains("Injected fault"));
}
Mockito.reset(spyFactory);
client.call(new LongWritable(RANDOM.nextLong()),address,null,null,0,conf);
}
finally {
server.stop();
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testPluggableIdentityProvider(){
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,"org.apache.hadoop.ipc.UserIdentityProvider");
List providers=conf.getInstances(CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,IdentityProvider.class);
assertTrue(providers.size() == 1);
IdentityProvider ip=providers.get(0);
assertNotNull(ip);
assertEquals(ip.getClass(),UserIdentityProvider.class);
}
Class: org.apache.hadoop.ipc.TestProtoBufRpc
APIUtilityVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test(timeout=6000) public void testExtraLongRpc() throws Exception {
TestRpcService2 client=getClient2();
final String shortString=StringUtils.repeat("X",4);
EchoRequestProto echoRequest=EchoRequestProto.newBuilder().setMessage(shortString).build();
EchoResponseProto echoResponse=client.echo2(null,echoRequest);
Assert.assertEquals(shortString,echoResponse.getMessage());
final String longString=StringUtils.repeat("X",4096);
echoRequest=EchoRequestProto.newBuilder().setMessage(longString).build();
try {
echoResponse=client.echo2(null,echoRequest);
Assert.fail("expected extra-long RPC to fail");
}
catch ( ServiceException se) {
}
}
Class: org.apache.hadoop.ipc.TestRPC
BooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that server.stop() properly stops all threads
*/
@Test public void testStopsAllThreads() throws IOException, InterruptedException {
int threadsBefore=countThreads("Server$Listener$Reader");
assertEquals("Expect no Reader threads running before test",0,threadsBefore);
final Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).build();
server.start();
try {
int threadsRunning=0;
long totalSleepTime=0;
do {
totalSleepTime+=10;
Thread.sleep(10);
threadsRunning=countThreads("Server$Listener$Reader");
}
while (threadsRunning == 0 && totalSleepTime < 5000);
threadsRunning=countThreads("Server$Listener$Reader");
assertTrue(threadsRunning > 0);
}
finally {
server.stop();
}
int threadsAfter=countThreads("Server$Listener$Reader");
assertEquals("Expect no Reader threads left running after test",0,threadsAfter);
}
Class: org.apache.hadoop.ipc.TestRPCCompatibility
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Verify that ProtocolMetaInfoServerSideTranslatorPB correctly looks up
* the server registry to extract protocol signatures and versions.
*/
@Test public void testProtocolMetaInfoSSTranslatorPB() throws Exception {
TestImpl1 impl=new TestImpl1();
server=new RPC.Builder(conf).setProtocol(TestProtocol1.class).setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
server.addProtocol(RPC.RpcKind.RPC_WRITABLE,TestProtocol0.class,impl);
server.start();
ProtocolMetaInfoServerSideTranslatorPB xlator=new ProtocolMetaInfoServerSideTranslatorPB(server);
GetProtocolSignatureResponseProto resp=xlator.getProtocolSignature(null,createGetProtocolSigRequestProto(TestProtocol1.class,RPC.RpcKind.RPC_PROTOCOL_BUFFER));
Assert.assertEquals(0,resp.getProtocolSignatureCount());
resp=xlator.getProtocolSignature(null,createGetProtocolSigRequestProto(TestProtocol1.class,RPC.RpcKind.RPC_WRITABLE));
Assert.assertEquals(1,resp.getProtocolSignatureCount());
ProtocolSignatureProto sig=resp.getProtocolSignatureList().get(0);
Assert.assertEquals(TestProtocol1.versionID,sig.getVersion());
boolean found=false;
int expected=ProtocolSignature.getFingerprint(TestProtocol1.class.getMethod("echo",String.class));
for ( int m : sig.getMethodsList()) {
if (expected == m) {
found=true;
break;
}
}
Assert.assertTrue(found);
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testHashCode() throws Exception {
Method strMethod=TestProtocol3.class.getMethod("echo",String.class);
int stringEchoHash=ProtocolSignature.getFingerprint(strMethod);
Method intMethod=TestProtocol3.class.getMethod("echo",int.class);
int intEchoHash=ProtocolSignature.getFingerprint(intMethod);
assertFalse(stringEchoHash == intEchoHash);
int intEchoHash1=ProtocolSignature.getFingerprint(TestProtocol2.class.getMethod("echo",int.class));
assertEquals(intEchoHash,intEchoHash1);
int stringEchoHash1=ProtocolSignature.getFingerprint(TestProtocol2.class.getMethod("echo",String.class));
assertFalse(stringEchoHash == stringEchoHash1);
int intEchoHashAlias=ProtocolSignature.getFingerprint(TestProtocol3.class.getMethod("echo_alias",int.class));
assertFalse(intEchoHash == intEchoHashAlias);
int intEchoHash2=ProtocolSignature.getFingerprint(TestProtocol3.class.getMethod("echo",int.class,int.class));
assertFalse(intEchoHash == intEchoHash2);
int hash1=ProtocolSignature.getFingerprint(new Method[]{intMethod,strMethod});
int hash2=ProtocolSignature.getFingerprint(new Method[]{strMethod,intMethod});
assertEquals(hash1,hash2);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
@Test public void testPerConnectionConf() throws Exception {
TestTokenSecretManager sm=new TestTokenSecretManager();
final Server server=new RPC.Builder(conf).setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
server.start();
final UserGroupInformation current=UserGroupInformation.getCurrentUser();
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
TestTokenIdentifier tokenId=new TestTokenIdentifier(new Text(current.getUserName()));
Token token=new Token(tokenId,sm);
SecurityUtil.setTokenService(token,addr);
current.addToken(token);
Configuration newConf=new Configuration(conf);
newConf.set(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,"");
Client client=null;
TestSaslProtocol proxy1=null;
TestSaslProtocol proxy2=null;
TestSaslProtocol proxy3=null;
int timeouts[]={111222,3333333};
try {
newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,timeouts[0]);
proxy1=RPC.getProxy(TestSaslProtocol.class,TestSaslProtocol.versionID,addr,newConf);
proxy1.getAuthMethod();
client=WritableRpcEngine.getClient(newConf);
Set conns=client.getConnectionIds();
assertEquals("number of connections in cache is wrong",1,conns.size());
proxy2=RPC.getProxy(TestSaslProtocol.class,TestSaslProtocol.versionID,addr,newConf);
proxy2.getAuthMethod();
assertEquals("number of connections in cache is wrong",1,conns.size());
newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,timeouts[1]);
proxy3=RPC.getProxy(TestSaslProtocol.class,TestSaslProtocol.versionID,addr,newConf);
proxy3.getAuthMethod();
assertEquals("number of connections in cache is wrong",2,conns.size());
ConnectionId[] connsArray={RPC.getConnectionIdForProxy(proxy1),RPC.getConnectionIdForProxy(proxy2),RPC.getConnectionIdForProxy(proxy3)};
assertEquals(connsArray[0],connsArray[1]);
assertEquals(connsArray[0].getMaxIdleTime(),timeouts[0]);
assertFalse(connsArray[0].equals(connsArray[2]));
assertNotSame(connsArray[2].getMaxIdleTime(),timeouts[1]);
}
finally {
server.stop();
if (client != null) {
client.getConnectionIds().clear();
}
if (proxy1 != null) RPC.stopProxy(proxy1);
if (proxy2 != null) RPC.stopProxy(proxy2);
if (proxy3 != null) RPC.stopProxy(proxy3);
}
}
Class: org.apache.hadoop.ipc.TestSocketFactory
APIUtilityVerifierInternalCallVerifierIdentityVerifierEqualityVerifierHybridVerifier
@Test public void testSocketFactoryAsKeyInMap(){
Map dummyCache=new HashMap();
int toBeCached1=1;
int toBeCached2=2;
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,"org.apache.hadoop.ipc.TestSocketFactory$DummySocketFactory");
final SocketFactory dummySocketFactory=NetUtils.getDefaultSocketFactory(conf);
dummyCache.put(dummySocketFactory,toBeCached1);
conf.set(CommonConfigurationKeys.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,"org.apache.hadoop.net.StandardSocketFactory");
final SocketFactory defaultSocketFactory=NetUtils.getDefaultSocketFactory(conf);
dummyCache.put(defaultSocketFactory,toBeCached2);
Assert.assertEquals("The cache contains two elements",2,dummyCache.size());
Assert.assertEquals("Equals of both socket factory shouldn't be same",defaultSocketFactory.equals(dummySocketFactory),false);
assertSame(toBeCached2,dummyCache.remove(defaultSocketFactory));
dummyCache.put(defaultSocketFactory,toBeCached2);
assertSame(toBeCached1,dummyCache.remove(dummySocketFactory));
}
APIUtilityVerifierUtilityVerifierBooleanVerifierHybridVerifier
/**
* To test OS dependent setting of default execution path for a MapRed task.
* Mainly that we can use MRJobConfig.DEFAULT_MAPRED_ADMIN_USER_ENV to set -
* for WINDOWS: %HADOOP_COMMON_HOME%\bin is expected to be included in PATH - for
* Linux: $HADOOP_COMMON_HOME/lib/native is expected to be included in
* LD_LIBRARY_PATH
*/
@Test public void testMapRedExecutionEnv(){
try {
Map environment=new HashMap();
String setupHadoopHomeCommand=Shell.WINDOWS ? "HADOOP_COMMON_HOME=C:\\fake\\PATH\\to\\hadoop\\common\\home" : "HADOOP_COMMON_HOME=/fake/path/to/hadoop/common/home";
MRApps.setEnvFromInputString(environment,setupHadoopHomeCommand,conf);
MRApps.setEnvFromInputString(environment,conf.get(MRJobConfig.MAPRED_ADMIN_USER_ENV,MRJobConfig.DEFAULT_MAPRED_ADMIN_USER_ENV),conf);
String executionPaths=environment.get(Shell.WINDOWS ? "PATH" : "LD_LIBRARY_PATH");
String toFind=Shell.WINDOWS ? "C:\\fake\\PATH\\to\\hadoop\\common\\home\\bin" : "/fake/path/to/hadoop/common/home/lib/native";
assertTrue("execution path does not include the hadoop lib location " + toFind,executionPaths.contains(toFind));
}
catch ( Exception e) {
e.printStackTrace();
fail("Exception in testing execution environment for MapReduce task");
tearDown();
}
try {
JobConf conf=new JobConf(mr.getConfig());
Path inDir=new Path("input");
Path outDir=new Path("output");
String input="The input";
configure(conf,inDir,outDir,input,ExecutionEnvCheckMapClass.class,IdentityReducer.class);
launchTest(conf,inDir,outDir,input);
}
catch ( Exception e) {
e.printStackTrace();
fail("Exception in testing propagation of env setting to child task");
tearDown();
}
}
InternalCallVerifierBooleanVerifierNullVerifierHybridVerifier
/**
* Test that the GC counter actually increments when we know that we've
* spent some time in the GC during the mapper.
*/
@Test public void testGcCounter() throws Exception {
Path inputPath=getInputPath();
Path outputPath=getOutputPath();
Configuration conf=new Configuration();
FileSystem fs=FileSystem.getLocal(conf);
if (fs.exists(outputPath)) {
fs.delete(outputPath,true);
}
if (fs.exists(inputPath)) {
fs.delete(inputPath,true);
}
createInputFile(inputPath,0,20);
Job job=Job.getInstance();
job.setMapperClass(GCMapper.class);
job.setNumReduceTasks(0);
job.getConfiguration().set(MRJobConfig.IO_SORT_MB,"25");
FileInputFormat.addInputPath(job,inputPath);
FileOutputFormat.setOutputPath(job,outputPath);
boolean ret=job.waitForCompletion(true);
assertTrue("job failed",ret);
Counter gcCounter=job.getCounters().findCounter(TaskCounter.GC_TIME_MILLIS);
assertNotNull(gcCounter);
assertTrue("No time spent in gc",gcCounter.getValue() > 0);
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testFromYarnApplicationReport(){
ApplicationId mockAppId=mock(ApplicationId.class);
when(mockAppId.getClusterTimestamp()).thenReturn(12345L);
when(mockAppId.getId()).thenReturn(6789);
ApplicationReport mockReport=mock(ApplicationReport.class);
when(mockReport.getTrackingUrl()).thenReturn("dummy-tracking-url");
when(mockReport.getApplicationId()).thenReturn(mockAppId);
when(mockReport.getYarnApplicationState()).thenReturn(YarnApplicationState.KILLED);
when(mockReport.getUser()).thenReturn("dummy-user");
when(mockReport.getQueue()).thenReturn("dummy-queue");
String jobFile="dummy-path/job.xml";
try {
JobStatus status=TypeConverter.fromYarn(mockReport,jobFile);
}
catch ( NullPointerException npe) {
Assert.fail("Type converstion from YARN fails for jobs without " + "ApplicationUsageReport");
}
ApplicationResourceUsageReport appUsageRpt=Records.newRecord(ApplicationResourceUsageReport.class);
Resource r=Records.newRecord(Resource.class);
r.setMemory(2048);
appUsageRpt.setNeededResources(r);
appUsageRpt.setNumReservedContainers(1);
appUsageRpt.setNumUsedContainers(3);
appUsageRpt.setReservedResources(r);
appUsageRpt.setUsedResources(r);
when(mockReport.getApplicationResourceUsageReport()).thenReturn(appUsageRpt);
JobStatus status=TypeConverter.fromYarn(mockReport,jobFile);
Assert.assertNotNull("fromYarn returned null status",status);
Assert.assertEquals("jobFile set incorrectly","dummy-path/job.xml",status.getJobFile());
Assert.assertEquals("queue set incorrectly","dummy-queue",status.getQueue());
Assert.assertEquals("trackingUrl set incorrectly","dummy-tracking-url",status.getTrackingUrl());
Assert.assertEquals("user set incorrectly","dummy-user",status.getUsername());
Assert.assertEquals("schedulingInfo set incorrectly","dummy-tracking-url",status.getSchedulingInfo());
Assert.assertEquals("jobId set incorrectly",6789,status.getJobID().getId());
Assert.assertEquals("state set incorrectly",JobStatus.State.KILLED,status.getState());
Assert.assertEquals("needed mem info set incorrectly",2048,status.getNeededMem());
Assert.assertEquals("num rsvd slots info set incorrectly",1,status.getNumReservedSlots());
Assert.assertEquals("num used slots info set incorrectly",3,status.getNumUsedSlots());
Assert.assertEquals("rsvd mem info set incorrectly",2048,status.getReservedMem());
Assert.assertEquals("used mem info set incorrectly",2048,status.getUsedMem());
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test(timeout=10000) public void testFormat() throws IOException, InterruptedException {
Job job=Job.getInstance(conf);
Random random=new Random();
long seed=random.nextLong();
random.setSeed(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int length=10000;
final int numFiles=10;
createFiles(length,numFiles,random,job);
TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
InputFormat format=new CombineSequenceFileInputFormat();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(length / (SequenceFile.SYNC_INTERVAL / 20)) + 1;
LOG.info("splitting: requesting = " + numSplits);
List splits=format.getSplits(job);
LOG.info("splitting: got = " + splits.size());
assertEquals("We got more than one splits!",1,splits.size());
InputSplit split=splits.get(0);
assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass());
BitSet bits=new BitSet(length);
RecordReader reader=format.createRecordReader(split,context);
MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),split);
reader.initialize(split,mcontext);
assertEquals("reader class is CombineFileRecordReader.",CombineFileRecordReader.class,reader.getClass());
try {
while (reader.nextKeyValue()) {
IntWritable key=reader.getCurrentKey();
BytesWritable value=reader.getCurrentValue();
assertNotNull("Value should not be null.",value);
final int k=key.get();
LOG.debug("read " + k);
assertFalse("Key in multiple partitions.",bits.get(k));
bits.set(k);
}
}
finally {
reader.close();
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test(timeout=10000) public void testFormat() throws Exception {
Job job=Job.getInstance(new Configuration(defaultConf));
Random random=new Random();
long seed=random.nextLong();
LOG.info("seed = " + seed);
random.setSeed(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int length=10000;
final int numFiles=10;
createFiles(length,numFiles,random);
CombineTextInputFormat format=new CombineTextInputFormat();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(length / 20) + 1;
LOG.info("splitting: requesting = " + numSplits);
List splits=format.getSplits(job);
LOG.info("splitting: got = " + splits.size());
assertEquals("We got more than one splits!",1,splits.size());
InputSplit split=splits.get(0);
assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass());
BitSet bits=new BitSet(length);
LOG.debug("split= " + split);
TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader reader=format.createRecordReader(split,context);
assertEquals("reader class is CombineFileRecordReader.",CombineFileRecordReader.class,reader.getClass());
MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),split);
reader.initialize(split,mcontext);
try {
int count=0;
while (reader.nextKeyValue()) {
LongWritable key=reader.getCurrentKey();
assertNotNull("Key should not be null.",key);
Text value=reader.getCurrentValue();
final int v=Integer.parseInt(value.toString());
LOG.debug("read " + v);
assertFalse("Key in multiple partitions.",bits.get(v));
bits.set(v);
count++;
}
LOG.debug("split=" + split + " count="+ count);
}
finally {
reader.close();
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
APIUtilityVerifierBranchVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test using the gzip codec for reading
*/
@Test(timeout=10000) public void testGzip() throws IOException, InterruptedException {
Configuration conf=new Configuration(defaultConf);
CompressionCodec gzip=new GzipCodec();
ReflectionUtils.setConf(gzip,conf);
localFs.delete(workDir,true);
writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n");
writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"this is a test\nof gzip\n");
Job job=Job.getInstance(conf);
FileInputFormat.setInputPaths(job,workDir);
CombineTextInputFormat format=new CombineTextInputFormat();
List splits=format.getSplits(job);
assertEquals("compressed splits == 1",1,splits.size());
List results=readSplit(format,splits.get(0),job);
assertEquals("splits[0] length",8,results.size());
final String[] firstList={"the quick","brown","fox jumped","over"," the lazy"," dog"};
final String[] secondList={"this is a test","of gzip"};
String first=results.get(0).toString();
if (first.equals(firstList[0])) {
testResults(results,firstList,secondList);
}
else if (first.equals(secondList[0])) {
testResults(results,secondList,firstList);
}
else {
fail("unexpected first token!");
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* run a distributed job and verify that TokenCache is available
* @throws IOException
*/
@Test public void testBinaryTokenFile() throws IOException {
Configuration conf=mrCluster.getConfig();
final String nnUri=dfsCluster.getURI(0).toString();
conf.set(MRJobConfig.JOB_NAMENODES,nnUri + "," + nnUri);
final String[] args={"-m","1","-r","1","-mt","1","-rt","1"};
int res=-1;
try {
res=ToolRunner.run(conf,new MySleepJob(),args);
}
catch ( Exception e) {
System.out.println("Job failed with " + e.getLocalizedMessage());
e.printStackTrace(System.out);
fail("Job failed");
}
assertEquals("dist job res is not 0:",0,res);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=10000) public void testMemoryMerge() throws Exception {
final int TOTAL_MEM_BYTES=10000;
final int OUTPUT_SIZE=7950;
JobConf conf=new JobConf();
conf.setFloat(MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT,1.0f);
conf.setLong(MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES,TOTAL_MEM_BYTES);
conf.setFloat(MRJobConfig.SHUFFLE_MEMORY_LIMIT_PERCENT,0.8f);
conf.setFloat(MRJobConfig.SHUFFLE_MERGE_PERCENT,0.9f);
TestExceptionReporter reporter=new TestExceptionReporter();
CyclicBarrier mergeStart=new CyclicBarrier(2);
CyclicBarrier mergeComplete=new CyclicBarrier(2);
StubbedMergeManager mgr=new StubbedMergeManager(conf,reporter,mergeStart,mergeComplete);
MapOutput out1=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertTrue("Should be a memory merge",(out1 instanceof InMemoryMapOutput));
InMemoryMapOutput mout1=(InMemoryMapOutput)out1;
fillOutput(mout1);
MapOutput out2=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertTrue("Should be a memory merge",(out2 instanceof InMemoryMapOutput));
InMemoryMapOutput mout2=(InMemoryMapOutput)out2;
fillOutput(mout2);
MapOutput out3=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertEquals("Should be told to wait",null,out3);
mout1.commit();
mout2.commit();
mergeStart.await();
Assert.assertEquals(1,mgr.getNumMerges());
out1=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertTrue("Should be a memory merge",(out1 instanceof InMemoryMapOutput));
mout1=(InMemoryMapOutput)out1;
fillOutput(mout1);
out2=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertTrue("Should be a memory merge",(out2 instanceof InMemoryMapOutput));
mout2=(InMemoryMapOutput)out2;
fillOutput(mout2);
out3=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertEquals("Should be told to wait",null,out3);
mout1.commit();
mout2.commit();
mergeComplete.await();
mergeStart.await();
Assert.assertEquals(2,mgr.getNumMerges());
mergeComplete.await();
Assert.assertEquals(2,mgr.getNumMerges());
Assert.assertEquals("exception reporter invoked",0,reporter.getNumExceptions());
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@SuppressWarnings({"unchecked","deprecation"}) @Test(timeout=10000) public void testOnDiskMerger() throws IOException, URISyntaxException, InterruptedException {
JobConf jobConf=new JobConf();
final int SORT_FACTOR=5;
jobConf.setInt(MRJobConfig.IO_SORT_FACTOR,SORT_FACTOR);
MapOutputFile mapOutputFile=new MROutputFiles();
FileSystem fs=FileSystem.getLocal(jobConf);
MergeManagerImpl manager=new MergeManagerImpl(null,jobConf,fs,null,null,null,null,null,null,null,null,null,null,mapOutputFile);
MergeThread,IntWritable,IntWritable> onDiskMerger=(MergeThread,IntWritable,IntWritable>)Whitebox.getInternalState(manager,"onDiskMerger");
int mergeFactor=(Integer)Whitebox.getInternalState(onDiskMerger,"mergeFactor");
assertEquals(mergeFactor,SORT_FACTOR);
onDiskMerger.suspend();
Random rand=new Random();
for (int i=0; i < 2 * SORT_FACTOR; ++i) {
Path path=new Path("somePath");
CompressAwarePath cap=new CompressAwarePath(path,1l,rand.nextInt());
manager.closeOnDiskFile(cap);
}
LinkedList> pendingToBeMerged=(LinkedList>)Whitebox.getInternalState(onDiskMerger,"pendingToBeMerged");
assertTrue("No inputs were added to list pending to merge",pendingToBeMerged.size() > 0);
for (int i=0; i < pendingToBeMerged.size(); ++i) {
List inputs=pendingToBeMerged.get(i);
for (int j=1; j < inputs.size(); ++j) {
assertTrue("Not enough / too many inputs were going to be merged",inputs.size() > 0 && inputs.size() <= SORT_FACTOR);
assertTrue("Inputs to be merged were not sorted according to size: ",inputs.get(j).getCompressedSize() >= inputs.get(j - 1).getCompressedSize());
}
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* To ensure nothing broken after we removed normalization
* from the MRAM side
* @throws Exception
*/
@Test public void testJobWithNonNormalizedCapabilities() throws Exception {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
JobConf jobConf=new JobConf(mrCluster.getConfig());
jobConf.setInt("mapreduce.map.memory.mb",700);
jobConf.setInt("mapred.reduce.memory.mb",1500);
SleepJob sleepJob=new SleepJob();
sleepJob.setConf(jobConf);
Job job=sleepJob.createJob(3,2,1000,1,500,1);
job.setJarByClass(SleepJob.class);
job.addFileToClassPath(APP_JAR);
job.submit();
boolean completed=job.waitForCompletion(true);
Assert.assertTrue("Job should be completed",completed);
Assert.assertEquals("Job should be finished successfully",JobStatus.State.SUCCEEDED,job.getJobState());
}
Class: org.apache.hadoop.mapreduce.v2.TestMRJobs
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=300000) public void testSleepJob() throws IOException, InterruptedException, ClassNotFoundException {
LOG.info("\n\n\nStarting testSleepJob().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
Configuration sleepConf=new Configuration(mrCluster.getConfig());
sleepConf.set(MRConfig.MASTER_ADDRESS,"local");
SleepJob sleepJob=new SleepJob();
sleepJob.setConf(sleepConf);
int numReduces=sleepConf.getInt("TestMRJobs.testSleepJob.reduces",2);
Job job=sleepJob.createJob(3,numReduces,10000,1,5000,1);
job.addFileToClassPath(APP_JAR);
job.setJarByClass(SleepJob.class);
job.setMaxMapAttempts(1);
job.submit();
String trackingUrl=job.getTrackingURL();
String jobId=job.getJobID().toString();
boolean succeeded=job.waitForCompletion(true);
Assert.assertTrue(succeeded);
Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState());
Assert.assertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID "+ jobId,trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
verifySleepJobCounters(job);
verifyTaskProgress(job);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testRandomWriter() throws IOException, InterruptedException, ClassNotFoundException {
LOG.info("\n\n\nStarting testRandomWriter().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
RandomTextWriterJob randomWriterJob=new RandomTextWriterJob();
mrCluster.getConfig().set(RandomTextWriterJob.TOTAL_BYTES,"3072");
mrCluster.getConfig().set(RandomTextWriterJob.BYTES_PER_MAP,"1024");
Job job=randomWriterJob.createJob(mrCluster.getConfig());
Path outputDir=new Path(OUTPUT_ROOT_DIR,"random-output");
FileOutputFormat.setOutputPath(job,outputDir);
job.setSpeculativeExecution(false);
job.addFileToClassPath(APP_JAR);
job.setJarByClass(RandomTextWriterJob.class);
job.setMaxMapAttempts(1);
job.submit();
String trackingUrl=job.getTrackingURL();
String jobId=job.getJobID().toString();
boolean succeeded=job.waitForCompletion(true);
Assert.assertTrue(succeeded);
Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState());
Assert.assertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID "+ jobId,trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
RemoteIterator iterator=FileContext.getFileContext(mrCluster.getConfig()).listStatus(outputDir);
int count=0;
while (iterator.hasNext()) {
FileStatus file=iterator.next();
if (!file.getPath().getName().equals(FileOutputCommitter.SUCCEEDED_FILE_NAME)) {
count++;
}
}
Assert.assertEquals("Number of part files is wrong!",3,count);
verifyRandomWriterCounters(job);
}
IterativeVerifierBranchVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testContainerRollingLog() throws IOException, InterruptedException, ClassNotFoundException {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
final SleepJob sleepJob=new SleepJob();
final JobConf sleepConf=new JobConf(mrCluster.getConfig());
sleepConf.set(MRJobConfig.MAP_LOG_LEVEL,Level.ALL.toString());
final long userLogKb=4;
sleepConf.setLong(MRJobConfig.TASK_USERLOG_LIMIT,userLogKb);
sleepConf.setInt(MRJobConfig.TASK_LOG_BACKUPS,3);
sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL,Level.ALL.toString());
final long amLogKb=7;
sleepConf.setLong(MRJobConfig.MR_AM_LOG_KB,amLogKb);
sleepConf.setInt(MRJobConfig.MR_AM_LOG_BACKUPS,7);
sleepJob.setConf(sleepConf);
final Job job=sleepJob.createJob(1,0,1L,100,0L,0);
job.setJarByClass(SleepJob.class);
job.addFileToClassPath(APP_JAR);
job.waitForCompletion(true);
final JobId jobId=TypeConverter.toYarn(job.getJobID());
final ApplicationId appID=jobId.getAppId();
int pollElapsed=0;
while (true) {
Thread.sleep(1000);
pollElapsed+=1000;
if (TERMINAL_RM_APP_STATES.contains(mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState())) {
break;
}
if (pollElapsed >= 60000) {
LOG.warn("application did not reach terminal state within 60 seconds");
break;
}
}
Assert.assertEquals(RMAppState.FINISHED,mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState());
final String appIdStr=appID.toString();
final String appIdSuffix=appIdStr.substring("application_".length(),appIdStr.length());
final String containerGlob="container_" + appIdSuffix + "_*_*";
final String syslogGlob=appIdStr + Path.SEPARATOR + containerGlob+ Path.SEPARATOR+ TaskLog.LogName.SYSLOG;
int numAppMasters=0;
int numMapTasks=0;
for (int i=0; i < NUM_NODE_MGRS; i++) {
final Configuration nmConf=mrCluster.getNodeManager(i).getConfig();
for ( String logDir : nmConf.getTrimmedStrings(YarnConfiguration.NM_LOG_DIRS)) {
final Path absSyslogGlob=new Path(logDir + Path.SEPARATOR + syslogGlob);
LOG.info("Checking for glob: " + absSyslogGlob);
final FileStatus[] syslogs=localFs.globStatus(absSyslogGlob);
for ( FileStatus slog : syslogs) {
boolean foundAppMaster=job.isUber();
final Path containerPathComponent=slog.getPath().getParent();
if (!foundAppMaster) {
final ContainerId cid=ConverterUtils.toContainerId(containerPathComponent.getName());
foundAppMaster=(cid.getId() == 1);
}
final FileStatus[] sysSiblings=localFs.globStatus(new Path(containerPathComponent,TaskLog.LogName.SYSLOG + "*"));
Arrays.sort(sysSiblings);
if (foundAppMaster) {
numAppMasters++;
}
else {
numMapTasks++;
}
if (foundAppMaster) {
Assert.assertSame("Unexpected number of AM sylog* files",sleepConf.getInt(MRJobConfig.MR_AM_LOG_BACKUPS,0) + 1,sysSiblings.length);
Assert.assertTrue("AM syslog.1 length kb should be >= " + amLogKb,sysSiblings[1].getLen() >= amLogKb * 1024);
}
else {
Assert.assertSame("Unexpected number of MR task sylog* files",sleepConf.getInt(MRJobConfig.TASK_LOG_BACKUPS,0) + 1,sysSiblings.length);
Assert.assertTrue("MR syslog.1 length kb should be >= " + userLogKb,sysSiblings[1].getLen() >= userLogKb * 1024);
}
}
}
}
Assert.assertEquals("No AppMaster log found!",1,numAppMasters);
if (sleepConf.getBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false)) {
Assert.assertEquals("MapTask log with uber found!",0,numMapTasks);
}
else {
Assert.assertEquals("No MapTask log found!",1,numMapTasks);
}
}
BooleanVerifierEqualityVerifierHybridVerifier
@Test public void testNotifyRetries() throws InterruptedException {
JobConf conf=new JobConf();
conf.set(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS,"0");
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS,"1");
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_URL,"http://nonexistent");
conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL,"5000");
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL,"5000");
JobReport jobReport=mock(JobReport.class);
long startTime=System.currentTimeMillis();
this.notificationCount=0;
this.setConf(conf);
this.notify(jobReport);
long endTime=System.currentTimeMillis();
Assert.assertEquals("Only 1 try was expected but was : " + this.notificationCount,1,this.notificationCount);
Assert.assertTrue("Should have taken more than 5 seconds it took " + (endTime - startTime),endTime - startTime > 5000);
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS,"3");
conf.set(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS,"3");
conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL,"3000");
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL,"3000");
startTime=System.currentTimeMillis();
this.notificationCount=0;
this.setConf(conf);
this.notify(jobReport);
endTime=System.currentTimeMillis();
Assert.assertEquals("Only 3 retries were expected but was : " + this.notificationCount,3,this.notificationCount);
Assert.assertTrue("Should have taken more than 9 seconds it took " + (endTime - startTime),endTime - startTime > 9000);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* AM with 2 maps and 1 reduce. For 1st map, one attempt fails, one attempt
* completely disappears because of failed launch, one attempt gets killed and
* one attempt succeeds. AM crashes after the first tasks finishes and
* recovers completely and succeeds in the second generation.
* @throws Exception
*/
@Test public void testSpeculative() throws Exception {
int runCount=0;
long am1StartTimeEst=System.currentTimeMillis();
MRApp app=new MRAppWithHistory(2,1,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
long jobStartTime=job.getReport().getStartTime();
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
Task reduceTask=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new TaskEvent(mapTask1.getID(),TaskEventType.T_ADD_SPEC_ATTEMPT));
int timeOut=0;
while (mapTask1.getAttempts().size() != 2 && timeOut++ < 10) {
Thread.sleep(1000);
LOG.info("Waiting for next attempt to start");
}
Iterator t1it=mapTask1.getAttempts().values().iterator();
TaskAttempt task1Attempt1=t1it.next();
TaskAttempt task1Attempt2=t1it.next();
TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next();
ContainerId t1a2contId=task1Attempt2.getAssignedContainerID();
LOG.info(t1a2contId.toString());
LOG.info(task1Attempt1.getID().toString());
LOG.info(task1Attempt2.getID().toString());
app.getContext().getEventHandler().handle(new TaskAttemptContainerLaunchedEvent(task1Attempt2.getID(),runCount));
app.waitForState(task1Attempt1,TaskAttemptState.RUNNING);
app.waitForState(task1Attempt2,TaskAttemptState.RUNNING);
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
Assert.assertEquals("Reduce Task state not correct",TaskState.RUNNING,reduceTask.getReport().getTaskState());
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(task1Attempt1,TaskAttemptState.SUCCEEDED);
app.waitForState(mapTask1,TaskState.SUCCEEDED);
long task1StartTime=mapTask1.getReport().getStartTime();
long task1FinishTime=mapTask1.getReport().getFinishTime();
app.stop();
long am2StartTimeEst=System.currentTimeMillis();
app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
reduceTask=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.RUNNING);
task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.waitForState(reduceTask,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertEquals("Job Start time not correct",jobStartTime,job.getReport().getStartTime());
Assert.assertEquals("Task Start time not correct",task1StartTime,mapTask1.getReport().getStartTime());
Assert.assertEquals("Task Finish time not correct",task1FinishTime,mapTask1.getReport().getFinishTime());
Assert.assertEquals(2,job.getAMInfos().size());
int attemptNum=1;
for ( AMInfo amInfo : job.getAMInfos()) {
Assert.assertEquals(attemptNum++,amInfo.getAppAttemptId().getAttemptId());
Assert.assertEquals(amInfo.getAppAttemptId(),amInfo.getContainerId().getApplicationAttemptId());
Assert.assertEquals(MRApp.NM_HOST,amInfo.getNodeManagerHost());
Assert.assertEquals(MRApp.NM_PORT,amInfo.getNodeManagerPort());
Assert.assertEquals(MRApp.NM_HTTP_PORT,amInfo.getNodeManagerHttpPort());
}
long am1StartTimeReal=job.getAMInfos().get(0).getStartTime();
long am2StartTimeReal=job.getAMInfos().get(1).getStartTime();
Assert.assertTrue(am1StartTimeReal >= am1StartTimeEst && am1StartTimeReal <= am2StartTimeEst);
Assert.assertTrue(am2StartTimeReal >= am2StartTimeEst && am2StartTimeReal <= System.currentTimeMillis());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* AM with 2 maps and 1 reduce. For 1st map, one attempt fails, one attempt
* completely disappears because of failed launch, one attempt gets killed and
* one attempt succeeds. AM crashes after the first tasks finishes and
* recovers completely and succeeds in the second generation.
* @throws Exception
*/
@Test public void testCrashed() throws Exception {
int runCount=0;
long am1StartTimeEst=System.currentTimeMillis();
MRApp app=new MRAppWithHistory(2,1,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
long jobStartTime=job.getReport().getStartTime();
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
Task reduceTask=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
TaskAttempt task1Attempt1=mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task1Attempt1,TaskAttemptState.RUNNING);
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
Assert.assertEquals("Reduce Task state not correct",TaskState.RUNNING,reduceTask.getReport().getTaskState());
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_FAILMSG));
app.waitForState(task1Attempt1,TaskAttemptState.FAILED);
int timeOut=0;
while (mapTask1.getAttempts().size() != 2 && timeOut++ < 10) {
Thread.sleep(2000);
LOG.info("Waiting for next attempt to start");
}
Assert.assertEquals(2,mapTask1.getAttempts().size());
Iterator itr=mapTask1.getAttempts().values().iterator();
itr.next();
TaskAttempt task1Attempt2=itr.next();
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt2.getID(),TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
app.waitForState(task1Attempt2,TaskAttemptState.FAILED);
timeOut=0;
while (mapTask1.getAttempts().size() != 3 && timeOut++ < 10) {
Thread.sleep(2000);
LOG.info("Waiting for next attempt to start");
}
Assert.assertEquals(3,mapTask1.getAttempts().size());
itr=mapTask1.getAttempts().values().iterator();
itr.next();
itr.next();
TaskAttempt task1Attempt3=itr.next();
app.waitForState(task1Attempt3,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt3.getID(),TaskAttemptEventType.TA_KILL));
app.waitForState(task1Attempt3,TaskAttemptState.KILLED);
timeOut=0;
while (mapTask1.getAttempts().size() != 4 && timeOut++ < 10) {
Thread.sleep(2000);
LOG.info("Waiting for next attempt to start");
}
Assert.assertEquals(4,mapTask1.getAttempts().size());
itr=mapTask1.getAttempts().values().iterator();
itr.next();
itr.next();
itr.next();
TaskAttempt task1Attempt4=itr.next();
app.waitForState(task1Attempt4,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt4.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
long task1StartTime=mapTask1.getReport().getStartTime();
long task1FinishTime=mapTask1.getReport().getFinishTime();
app.stop();
long am2StartTimeEst=System.currentTimeMillis();
app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
reduceTask=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.RUNNING);
task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.waitForState(reduceTask,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertEquals("Job Start time not correct",jobStartTime,job.getReport().getStartTime());
Assert.assertEquals("Task Start time not correct",task1StartTime,mapTask1.getReport().getStartTime());
Assert.assertEquals("Task Finish time not correct",task1FinishTime,mapTask1.getReport().getFinishTime());
Assert.assertEquals(2,job.getAMInfos().size());
int attemptNum=1;
for ( AMInfo amInfo : job.getAMInfos()) {
Assert.assertEquals(attemptNum++,amInfo.getAppAttemptId().getAttemptId());
Assert.assertEquals(amInfo.getAppAttemptId(),amInfo.getContainerId().getApplicationAttemptId());
Assert.assertEquals(MRApp.NM_HOST,amInfo.getNodeManagerHost());
Assert.assertEquals(MRApp.NM_PORT,amInfo.getNodeManagerPort());
Assert.assertEquals(MRApp.NM_HTTP_PORT,amInfo.getNodeManagerHttpPort());
}
long am1StartTimeReal=job.getAMInfos().get(0).getStartTime();
long am2StartTimeReal=job.getAMInfos().get(1).getStartTime();
Assert.assertTrue(am1StartTimeReal >= am1StartTimeEst && am1StartTimeReal <= am2StartTimeEst);
Assert.assertTrue(am2StartTimeReal >= am2StartTimeEst && am2StartTimeReal <= System.currentTimeMillis());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testCommandLine() throws Exception {
MyMRApp app=new MyMRApp(1,0,true,this.getClass().getName(),true);
Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true);
Job job=app.submit(conf);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertEquals("[" + MRApps.crossPlatformify("JAVA_HOME") + "/bin/java"+ " -Djava.net.preferIPv4Stack=true"+ " -Dhadoop.metrics.log.level=WARN"+ " -Xmx200m -Djava.io.tmpdir="+ MRApps.crossPlatformify("PWD")+ "/tmp"+ " -Dlog4j.configuration=container-log4j.properties"+ " -Dyarn.app.container.log.dir="+ " -Dyarn.app.container.log.filesize=0"+ " -Dhadoop.root.logger=INFO,CLA"+ " org.apache.hadoop.mapred.YarnChild 127.0.0.1"+ " 54321"+ " attempt_0_0000_m_000000_0"+ " 0"+ " 1>/stdout"+ " 2>/stderr ]",app.myCommandLine);
Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"));
Assert.assertEquals("INFO,console",app.cmdEnvironment.get("HADOOP_ROOT_LOGGER"));
Assert.assertTrue("HADOOP_CLIENT_OPTS not set for job",app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS"));
Assert.assertEquals("",app.cmdEnvironment.get("HADOOP_CLIENT_OPTS"));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testEnvironmentVariables() throws Exception {
MyMRApp app=new MyMRApp(1,0,true,this.getClass().getName(),true);
Configuration conf=new Configuration();
conf.set(JobConf.MAPRED_MAP_TASK_ENV,"HADOOP_CLIENT_OPTS=test");
conf.setStrings(MRJobConfig.MAP_LOG_LEVEL,"WARN");
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"));
Assert.assertEquals("WARN,console",app.cmdEnvironment.get("HADOOP_ROOT_LOGGER"));
Assert.assertTrue("HADOOP_CLIENT_OPTS not set for job",app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS"));
Assert.assertEquals("test",app.cmdEnvironment.get("HADOOP_CLIENT_OPTS"));
app=new MyMRApp(1,0,true,this.getClass().getName(),true);
conf=new Configuration();
conf.set(JobConf.MAPRED_MAP_TASK_ENV,"HADOOP_ROOT_LOGGER=trace");
job=app.submit(conf);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"));
Assert.assertEquals("trace",app.cmdEnvironment.get("HADOOP_ROOT_LOGGER"));
}
InternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test method 'singleTaskCounter'. Should set SingleCounterPage class for rendering
*/
@Test public void testGetSingleTaskCounter() throws IOException {
appController.singleTaskCounter();
assertEquals(SingleCounterPage.class,appController.getClazz());
assertNotNull(appController.getProperty().get(AppController.COUNTER_GROUP));
assertNotNull(appController.getProperty().get(AppController.COUNTER_NAME));
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testRefreshUserToGroupsMappings() throws Exception {
String[] args=new String[]{"-refreshUserToGroupsMappings"};
Groups groups=Groups.getUserToGroupsMappingService(conf);
String user=UserGroupInformation.getCurrentUser().getUserName();
System.out.println("first attempt:");
List g1=groups.getGroups(user);
String[] str_groups=new String[g1.size()];
g1.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
System.out.println("second attempt, should be same:");
List g2=groups.getGroups(user);
g2.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g2.size(); i++) {
assertEquals("Should be same group ",g1.get(i),g2.get(i));
}
hsAdminClient.run(args);
System.out.println("third attempt(after refresh command), should be different:");
List g3=groups.getGroups(user);
g3.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g3.size(); i++) {
assertFalse("Should be different group: " + g1.get(i) + " and "+ g3.get(i),g1.get(i).equals(g3.get(i)));
}
}
APIUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testJobHistoryFileNameBackwardsCompatible() throws IOException {
JobID oldJobId=JobID.forName(JOB_ID);
JobId jobId=TypeConverter.toYarn(oldJobId);
long submitTime=Long.parseLong(SUBMIT_TIME);
long finishTime=Long.parseLong(FINISH_TIME);
int numMaps=Integer.parseInt(NUM_MAPS);
int numReduces=Integer.parseInt(NUM_REDUCES);
String jobHistoryFile=String.format(OLD_JOB_HISTORY_FILE_FORMATTER,JOB_ID,SUBMIT_TIME,USER_NAME,JOB_NAME,FINISH_TIME,NUM_MAPS,NUM_REDUCES,JOB_STATUS);
JobIndexInfo info=FileNameIndexUtils.getIndexInfo(jobHistoryFile);
Assert.assertEquals("Job id incorrect after decoding old history file",jobId,info.getJobId());
Assert.assertEquals("Submit time incorrect after decoding old history file",submitTime,info.getSubmitTime());
Assert.assertEquals("User incorrect after decoding old history file",USER_NAME,info.getUser());
Assert.assertEquals("Job name incorrect after decoding old history file",JOB_NAME,info.getJobName());
Assert.assertEquals("Finish time incorrect after decoding old history file",finishTime,info.getFinishTime());
Assert.assertEquals("Num maps incorrect after decoding old history file",numMaps,info.getNumMaps());
Assert.assertEquals("Num reduces incorrect after decoding old history file",numReduces,info.getNumReduces());
Assert.assertEquals("Job status incorrect after decoding old history file",JOB_STATUS,info.getJobStatus());
Assert.assertNull("Queue name incorrect after decoding old history file",info.getQueueName());
}
APIUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testGetJobFileWithUser(){
Configuration conf=new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,"/my/path/to/staging");
String jobFile=MRApps.getJobFile(conf,"dummy-user",new JobID("dummy-job",12345));
assertNotNull("getJobFile results in null.",jobFile);
assertEquals("jobFile with specified user is not as expected.","/my/path/to/staging/dummy-user/.staging/job_dummy-job_12345/job.xml",jobFile);
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testSetClasspathWithJobClassloader() throws IOException {
Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER,true);
Map env=new HashMap();
MRApps.setClasspath(env,conf);
String cp=env.get("CLASSPATH");
String appCp=env.get("APP_CLASSPATH");
assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is in the" + " classpath!",cp.contains("jar" + ApplicationConstants.CLASS_PATH_SEPARATOR + "job"));
assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but PWD is in the classpath!",cp.contains("PWD"));
String expectedAppClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList(ApplicationConstants.Environment.PWD.$$(),"job.jar/job.jar","job.jar/classes/","job.jar/lib/*",ApplicationConstants.Environment.PWD.$$() + "/*"));
assertEquals("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is not in the app" + " classpath!",expectedAppClasspath,appCp);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testHangingSink(){
new ConfigBuilder().add("*.period",8).add("test.sink.test.class",TestSink.class.getName()).add("test.sink.hanging.retry.delay","1").add("test.sink.hanging.retry.backoff","1.01").add("test.sink.hanging.retry.count","0").save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms=new MetricsSystemImpl("Test");
ms.start();
TestSource s=ms.register("s3","s3 desc",new TestSource("s3rec"));
s.c1.incr();
HangingSink hanging=new HangingSink();
ms.registerSink("hanging","Hang the sink!",hanging);
ms.publishMetricsNow();
assertEquals(1L,ms.droppedPubAll.value());
assertFalse(hanging.getInterrupted());
ms.stop();
ms.shutdown();
assertTrue(hanging.getInterrupted());
assertTrue("The sink didn't get called after its first hang " + "for subsequent records.",hanging.getGotCalledSecondTime());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test concurrent consumer access, which is illegal
* @throws Exception
*/
@Test public void testConcurrentConsumers() throws Exception {
final SinkQueue q=newSleepingConsumerQueue(2,1);
assertTrue("should enqueue",q.enqueue(2));
assertEquals("queue back",2,(int)q.back());
assertTrue("should drop",!q.enqueue(3));
shouldThrowCME(new Fun(){
@Override public void run(){
q.clear();
}
}
);
shouldThrowCME(new Fun(){
@Override public void run() throws Exception {
q.consume(null);
}
}
);
shouldThrowCME(new Fun(){
@Override public void run() throws Exception {
q.consumeAll(null);
}
}
);
shouldThrowCME(new Fun(){
@Override public void run() throws Exception {
q.dequeue();
}
}
);
assertEquals("queue size",2,q.size());
assertEquals("queue front",1,(int)q.front());
assertEquals("queue back",2,(int)q.back());
}
InternalCallVerifierIdentityVerifierEqualityVerifierHybridVerifier
/**
* Test the consumer throwing exceptions
* @throws Exception
*/
@Test public void testConsumerException() throws Exception {
final SinkQueue q=new SinkQueue(1);
final RuntimeException ex=new RuntimeException("expected");
q.enqueue(1);
try {
q.consume(new Consumer(){
@Override public void consume( Integer e){
throw ex;
}
}
);
}
catch ( Exception expected) {
assertSame("consumer exception",ex,expected);
}
assertEquals("queue size",1,q.size());
assertEquals("element",1,(int)q.dequeue());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test common use case
* @throws Exception
*/
@Test public void testCommon() throws Exception {
final SinkQueue q=new SinkQueue(2);
q.enqueue(1);
assertEquals("queue front",1,(int)q.front());
assertEquals("queue back",1,(int)q.back());
assertEquals("element",1,(int)q.dequeue());
assertTrue("should enqueue",q.enqueue(2));
q.consume(new Consumer(){
@Override public void consume( Integer e){
assertEquals("element",2,(int)e);
}
}
);
assertTrue("should enqueue",q.enqueue(3));
assertEquals("element",3,(int)q.dequeue());
assertEquals("queue size",0,q.size());
assertEquals("queue front",null,q.front());
assertEquals("queue back",null,q.back());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test nonblocking enqueue when queue is full
* @throws Exception
*/
@Test public void testFull() throws Exception {
final SinkQueue q=new SinkQueue(1);
q.enqueue(1);
assertTrue("should drop",!q.enqueue(2));
assertEquals("element",1,(int)q.dequeue());
q.enqueue(3);
q.consume(new Consumer(){
@Override public void consume( Integer e){
assertEquals("element",3,(int)e);
}
}
);
assertEquals("queue size",0,q.size());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test consumers that take their time.
* @throws Exception
*/
@Test public void testHangingConsumer() throws Exception {
SinkQueue q=newSleepingConsumerQueue(2,1,2);
assertEquals("queue back",2,(int)q.back());
assertTrue("should drop",!q.enqueue(3));
assertEquals("queue size",2,q.size());
assertEquals("queue head",1,(int)q.front());
assertEquals("queue back",2,(int)q.back());
}
IterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test the consumeAll method
* @throws Exception
*/
@Test public void testConsumeAll() throws Exception {
final int capacity=64;
final SinkQueue q=new SinkQueue(capacity);
for (int i=0; i < capacity; ++i) {
assertTrue("should enqueue",q.enqueue(i));
}
assertTrue("should not enqueue",!q.enqueue(capacity));
final Runnable trigger=mock(Runnable.class);
q.consumeAll(new Consumer(){
private int expected=0;
@Override public void consume( Integer e){
assertEquals("element",expected++,(int)e);
trigger.run();
}
}
);
verify(trigger,times(capacity)).run();
}
InternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Check that counts and quantile estimates are correctly reset after a call
* to {@link SampleQuantiles#clear()}.
*/
@Test public void testClear() throws IOException {
for (int i=0; i < 1000; i++) {
estimator.insert(i);
}
estimator.clear();
assertEquals(estimator.getCount(),0);
assertEquals(estimator.getSampleCount(),0);
assertNull(estimator.snapshot());
}
InternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Check that the counts of the number of items in the window and sample are
* incremented correctly as items are added.
*/
@Test public void testCount() throws IOException {
assertEquals(estimator.getCount(),0);
assertEquals(estimator.getSampleCount(),0);
assertNull(estimator.snapshot());
estimator.insert(1337);
assertEquals(estimator.getCount(),1);
estimator.snapshot();
assertEquals(estimator.getSampleCount(),1);
assertEquals("50.00 %ile +/- 5.00%: 1337\n" + "75.00 %ile +/- 2.50%: 1337\n" + "90.00 %ile +/- 1.00%: 1337\n"+ "95.00 %ile +/- 0.50%: 1337\n"+ "99.00 %ile +/- 0.10%: 1337",estimator.toString());
}
Class: org.apache.hadoop.net.TestDNS
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Get the IP addresses of an unknown interface
*/
@Test public void testIPsOfUnknownInterface() throws Exception {
try {
DNS.getIPs("name-of-an-unknown-interface");
fail("Got an IP for a bogus interface");
}
catch ( UnknownHostException e) {
assertEquals("No such interface name-of-an-unknown-interface",e.getMessage());
}
}
APIUtilityVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test that repeated calls to getting the local host are fairly fast, and
* hence that caching is being used
* @throws Exception if hostname lookups fail
*/
@Test public void testGetLocalHostIsFast() throws Exception {
String hostname1=DNS.getDefaultHost(DEFAULT);
assertNotNull(hostname1);
String hostname2=DNS.getDefaultHost(DEFAULT);
long t1=Time.now();
String hostname3=DNS.getDefaultHost(DEFAULT);
long t2=Time.now();
assertEquals(hostname3,hostname2);
assertEquals(hostname2,hostname1);
long interval=t2 - t1;
assertTrue("Took too long to determine local host - caching is not working",interval < 20000);
}
Class: org.apache.hadoop.net.TestNetUtils
UtilityVerifierEqualityVerifierHybridVerifier
@Test public void testCreateSocketAddress() throws Throwable {
InetSocketAddress addr=NetUtils.createSocketAddr("127.0.0.1:12345",1000,"myconfig");
assertEquals("127.0.0.1",addr.getAddress().getHostAddress());
assertEquals(12345,addr.getPort());
addr=NetUtils.createSocketAddr("127.0.0.1",1000,"myconfig");
assertEquals("127.0.0.1",addr.getAddress().getHostAddress());
assertEquals(1000,addr.getPort());
try {
addr=NetUtils.createSocketAddr("127.0.0.1:blahblah",1000,"myconfig");
fail("Should have failed to parse bad port");
}
catch ( IllegalArgumentException iae) {
assertInException(iae,"myconfig");
}
}
UtilityVerifierBooleanVerifierHybridVerifier
/**
* Test that we can't accidentally connect back to the connecting socket due
* to a quirk in the TCP spec.
* This is a regression test for HADOOP-6722.
*/
@Test public void testAvoidLoopbackTcpSockets() throws Exception {
Configuration conf=new Configuration();
Socket socket=NetUtils.getDefaultSocketFactory(conf).createSocket();
socket.bind(new InetSocketAddress("127.0.0.1",0));
System.err.println("local address: " + socket.getLocalAddress());
System.err.println("local port: " + socket.getLocalPort());
try {
NetUtils.connect(socket,new InetSocketAddress(socket.getLocalAddress(),socket.getLocalPort()),20000);
socket.close();
fail("Should not have connected");
}
catch ( ConnectException ce) {
System.err.println("Got exception: " + ce);
assertTrue(ce.getMessage().contains("resulted in a loopback"));
}
catch ( SocketException se) {
assertTrue(se.getMessage().contains("Invalid argument"));
}
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test for {@link NetUtils#normalizeHostNames}
*/
@Test public void testNormalizeHostName(){
List hosts=Arrays.asList(new String[]{"127.0.0.1","localhost","1.kanyezone.appspot.com","UnknownHost123"});
List normalizedHosts=NetUtils.normalizeHostNames(hosts);
assertEquals(normalizedHosts.get(0),hosts.get(0));
assertFalse(normalizedHosts.get(1).equals(hosts.get(1)));
assertEquals(normalizedHosts.get(1),hosts.get(0));
assertFalse(normalizedHosts.get(2).equals(hosts.get(2)));
assertEquals(normalizedHosts.get(3),hosts.get(3));
}
Class: org.apache.hadoop.net.TestNetworkTopology
IterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testRemove() throws Exception {
for (int i=0; i < dataNodes.length; i++) {
cluster.remove(dataNodes[i]);
}
for (int i=0; i < dataNodes.length; i++) {
assertFalse(cluster.contains(dataNodes[i]));
}
assertEquals(0,cluster.getNumOfLeaves());
for (int i=0; i < dataNodes.length; i++) {
cluster.add(dataNodes[i]);
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test(timeout=180000) public void testInvalidNetworkTopologiesNotCachedInHdfs() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
try {
String racks[]={"/a/b","/c"};
String hosts[]={"foo1.example.com","foo2.example.com"};
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).racks(racks).hosts(hosts).build();
cluster.waitActive();
NamenodeProtocols nn=cluster.getNameNodeRpc();
Assert.assertNotNull(nn);
DatanodeInfo[] info;
while (true) {
info=nn.getDatanodeReport(DatanodeReportType.LIVE);
Assert.assertFalse(info.length == 2);
if (info.length == 1) {
break;
}
Thread.sleep(1000);
}
int validIdx=info[0].getHostName().equals(hosts[0]) ? 0 : 1;
int invalidIdx=validIdx == 1 ? 0 : 1;
StaticMapping.addNodeToRack(hosts[invalidIdx],racks[validIdx]);
LOG.info("datanode " + validIdx + " came up with network location "+ info[0].getNetworkLocation());
cluster.restartDataNode(invalidIdx);
Thread.sleep(5000);
while (true) {
info=nn.getDatanodeReport(DatanodeReportType.LIVE);
if (info.length == 2) {
break;
}
if (info.length == 0) {
LOG.info("got no valid DNs");
}
else if (info.length == 1) {
LOG.info("got one valid DN: " + info[0].getHostName() + " (at "+ info[0].getNetworkLocation()+ ")");
}
Thread.sleep(1000);
}
Assert.assertEquals(info[0].getNetworkLocation(),info[1].getNetworkLocation());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
@Test public void testCreateInvalidTopology() throws Exception {
NetworkTopology invalCluster=new NetworkTopology();
DatanodeDescriptor invalDataNodes[]=new DatanodeDescriptor[]{DFSTestUtil.getDatanodeDescriptor("1.1.1.1","/d1/r1"),DFSTestUtil.getDatanodeDescriptor("2.2.2.2","/d1/r1"),DFSTestUtil.getDatanodeDescriptor("3.3.3.3","/d1")};
invalCluster.add(invalDataNodes[0]);
invalCluster.add(invalDataNodes[1]);
try {
invalCluster.add(invalDataNodes[2]);
fail("expected InvalidTopologyException");
}
catch ( NetworkTopology.InvalidTopologyException e) {
assertTrue(e.getMessage().startsWith("Failed to add "));
assertTrue(e.getMessage().contains("You cannot have a rack and a non-rack node at the same " + "level of the network topology."));
}
}
APIUtilityVerifierIterativeVerifierBranchVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This test checks that chooseRandom works for an excluded rack.
*/
@Test public void testChooseRandomExcludedRack(){
Map frequency=pickNodesAtRandom(100,"~" + "/d2");
for (int j=0; j < dataNodes.length; j++) {
int freq=frequency.get(dataNodes[j]);
if (dataNodes[j].getNetworkLocation().startsWith("/d2")) {
assertEquals(0,freq);
}
else {
assertTrue(freq > 0);
}
}
}
BranchVerifierUtilityVerifierBooleanVerifierHybridVerifier
/**
* This test checks that adding a node with invalid topology will be failed
* with an exception to show topology is invalid.
*/
@Test public void testAddNodeWithInvalidTopology(){
try {
cluster.add(rackOnlyNode);
fail("Exception should be thrown, so we should not have reached here.");
}
catch ( Exception e) {
if (!(e instanceof IllegalArgumentException)) {
fail("Expecting IllegalArgumentException, but caught:" + e);
}
assertTrue(e.getMessage().contains("illegal network location"));
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test setting some server options.
* @throws IOException
*/
@Test(timeout=180000) public void testServerOptions() throws Exception {
final String TEST_PATH=new File(sockDir.getDir(),"test_sock_server_options").getAbsolutePath();
DomainSocket serv=DomainSocket.bindAndListen(TEST_PATH);
try {
int bufSize=serv.getAttribute(DomainSocket.RECEIVE_BUFFER_SIZE);
int newBufSize=bufSize / 2;
serv.setAttribute(DomainSocket.RECEIVE_BUFFER_SIZE,newBufSize);
int nextBufSize=serv.getAttribute(DomainSocket.RECEIVE_BUFFER_SIZE);
Assert.assertEquals(newBufSize,nextBufSize);
int newTimeout=1000;
serv.setAttribute(DomainSocket.RECEIVE_TIMEOUT,newTimeout);
int nextTimeout=serv.getAttribute(DomainSocket.RECEIVE_TIMEOUT);
Assert.assertEquals(newTimeout,nextTimeout);
try {
serv.accept();
Assert.fail("expected the accept() to time out and fail");
}
catch ( SocketTimeoutException e) {
GenericTestUtils.assertExceptionContains("accept(2) error: ",e);
}
}
finally {
serv.close();
Assert.assertFalse(serv.isOpen());
}
}
APIUtilityVerifierIterativeVerifierBranchVerifierUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierPublicFieldVerifierHybridVerifier
/**
* Test file descriptor passing.
* @throws IOException
*/
@Test(timeout=180000) public void testFdPassing() throws Exception {
final String TEST_PATH=new File(sockDir.getDir(),"test_sock").getAbsolutePath();
final byte clientMsg1[]=new byte[]{0x11,0x22,0x33,0x44,0x55,0x66};
final byte serverMsg1[]=new byte[]{0x31,0x30,0x32,0x34,0x31,0x33,0x44,0x1,0x1,0x1,0x1,0x1};
final ArrayBlockingQueue threadResults=new ArrayBlockingQueue(2);
final DomainSocket serv=DomainSocket.bindAndListen(TEST_PATH);
final PassedFile passedFiles[]=new PassedFile[]{new PassedFile(1),new PassedFile(2)};
final FileDescriptor passedFds[]=new FileDescriptor[passedFiles.length];
for (int i=0; i < passedFiles.length; i++) {
passedFds[i]=passedFiles[i].getInputStream().getFD();
}
Thread serverThread=new Thread(){
public void run(){
DomainSocket conn=null;
try {
conn=serv.accept();
byte in1[]=new byte[clientMsg1.length];
InputStream connInputStream=conn.getInputStream();
IOUtils.readFully(connInputStream,in1,0,in1.length);
Assert.assertTrue(Arrays.equals(clientMsg1,in1));
DomainSocket domainConn=(DomainSocket)conn;
domainConn.sendFileDescriptors(passedFds,serverMsg1,0,serverMsg1.length);
conn.close();
}
catch ( Throwable e) {
threadResults.add(e);
Assert.fail(e.getMessage());
}
threadResults.add(new Success());
}
}
;
serverThread.start();
Thread clientThread=new Thread(){
public void run(){
try {
DomainSocket client=DomainSocket.connect(TEST_PATH);
OutputStream clientOutputStream=client.getOutputStream();
InputStream clientInputStream=client.getInputStream();
clientOutputStream.write(clientMsg1);
DomainSocket domainConn=(DomainSocket)client;
byte in1[]=new byte[serverMsg1.length];
FileInputStream recvFis[]=new FileInputStream[passedFds.length];
int r=domainConn.recvFileInputStreams(recvFis,in1,0,in1.length - 1);
Assert.assertTrue(r > 0);
IOUtils.readFully(clientInputStream,in1,r,in1.length - r);
Assert.assertTrue(Arrays.equals(serverMsg1,in1));
for (int i=0; i < passedFds.length; i++) {
Assert.assertNotNull(recvFis[i]);
passedFiles[i].checkInputStream(recvFis[i]);
}
for ( FileInputStream fis : recvFis) {
fis.close();
}
client.close();
}
catch ( Throwable e) {
threadResults.add(e);
}
threadResults.add(new Success());
}
}
;
clientThread.start();
for (int i=0; i < 2; i++) {
Throwable t=threadResults.take();
if (!(t instanceof Success)) {
Assert.fail(t.getMessage() + ExceptionUtils.getStackTrace(t));
}
}
serverThread.join(120000);
clientThread.join(120000);
serv.close();
for ( PassedFile pf : passedFiles) {
pf.cleanup();
}
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=180000) public void testShutdown() throws Exception {
final AtomicInteger bytesRead=new AtomicInteger(0);
final AtomicBoolean failed=new AtomicBoolean(false);
final DomainSocket[] socks=DomainSocket.socketpair();
Runnable reader=new Runnable(){
@Override public void run(){
while (true) {
try {
int ret=socks[1].getInputStream().read();
if (ret == -1) return;
bytesRead.addAndGet(1);
}
catch ( IOException e) {
DomainSocket.LOG.error("reader error",e);
failed.set(true);
return;
}
}
}
}
;
Thread readerThread=new Thread(reader);
readerThread.start();
socks[0].getOutputStream().write(1);
socks[0].getOutputStream().write(2);
socks[0].getOutputStream().write(3);
Assert.assertTrue(readerThread.isAlive());
socks[0].shutdown();
readerThread.join();
Assert.assertFalse(failed.get());
Assert.assertEquals(3,bytesRead.get());
IOUtils.cleanup(null,socks);
}
TestInitializerAssumptionSetterHybridVerifier
@Before public void before(){
Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
}
BranchVerifierUtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
@Test public void testGroupsCaching() throws Exception {
conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS,0);
Groups groups=new Groups(conf);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
FakeGroupMapping.clearBlackList();
FakeGroupMapping.addToBlackList("user1");
assertTrue(groups.getGroups("me").size() == 2);
FakeGroupMapping.addToBlackList("me");
assertTrue(groups.getGroups("me").size() == 2);
try {
LOG.error("We are not supposed to get here." + groups.getGroups("user1").toString());
fail();
}
catch ( IOException ioe) {
if (!ioe.getMessage().startsWith("No groups found")) {
LOG.error("Got unexpected exception: " + ioe.getMessage());
fail();
}
}
FakeGroupMapping.clearBlackList();
assertTrue(groups.getGroups("user1").size() == 2);
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testNegativeGroupCaching() throws Exception {
final String user="negcache";
final String failMessage="Did not throw IOException: ";
conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS,2);
FakeTimer timer=new FakeTimer();
Groups groups=new Groups(conf,timer);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
FakeGroupMapping.addToBlackList(user);
try {
groups.getGroups(user);
fail(failMessage + "Failed to obtain groups from FakeGroupMapping.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("No groups found for user",e);
}
try {
groups.getGroups(user);
fail(failMessage + "The user is in the negative cache.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("No groups found for user",e);
}
FakeGroupMapping.clearBlackList();
try {
groups.getGroups(user);
fail(failMessage + "The user is still in the negative cache, even " + "FakeGroupMapping has resumed.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("No groups found for user",e);
}
timer.advance(4 * 1000);
assertEquals(Arrays.asList(myGroups),groups.getGroups(user));
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testGroupMappingRefresh() throws Exception {
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refreshUserToGroupsMappings"};
Groups groups=Groups.getUserToGroupsMappingService(config);
String user=UserGroupInformation.getCurrentUser().getUserName();
System.out.println("first attempt:");
List g1=groups.getGroups(user);
String[] str_groups=new String[g1.size()];
g1.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
System.out.println("second attempt, should be same:");
List g2=groups.getGroups(user);
g2.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g2.size(); i++) {
assertEquals("Should be same group ",g1.get(i),g2.get(i));
}
admin.run(args);
System.out.println("third attempt(after refresh command), should be different:");
List g3=groups.getGroups(user);
g3.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g3.size(); i++) {
assertFalse("Should be different group: " + g1.get(i) + " and "+ g3.get(i),g1.get(i).equals(g3.get(i)));
}
Thread.sleep(groupRefreshTimeoutSec * 1100);
System.out.println("fourth attempt(after timeout), should be different:");
List g4=groups.getGroups(user);
g4.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g4.size(); i++) {
assertFalse("Should be different group ",g3.get(i).equals(g4.get(i)));
}
}
IterativeVerifierUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* This test checks a race condition between getting and adding tokens for
* the current user. Calling UserGroupInformation.getCurrentUser() returns
* a new object each time, so simply making these methods synchronized was not
* enough to prevent race conditions and causing a
* ConcurrentModificationException. These methods are synchronized on the
* Subject, which is the same object between UserGroupInformation instances.
* This test tries to cause a CME, by exposing the race condition. Previously
* this test would fail every time; now it does not.
*/
@Test public void testTokenRaceCondition() throws Exception {
UserGroupInformation userGroupInfo=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES);
userGroupInfo.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
assertNotEquals(UserGroupInformation.getLoginUser(),UserGroupInformation.getCurrentUser());
GetTokenThread thread=new GetTokenThread();
try {
thread.start();
for (int i=0; i < 100; i++) {
@SuppressWarnings("unchecked") Token extends TokenIdentifier> t=mock(Token.class);
when(t.getService()).thenReturn(new Text("t" + i));
UserGroupInformation.getCurrentUser().addToken(t);
assertNull("ConcurrentModificationException encountered",thread.cme);
}
}
catch ( ConcurrentModificationException cme) {
cme.printStackTrace();
fail("ConcurrentModificationException encountered");
}
finally {
thread.runThread=false;
thread.join(5 * 1000);
}
return null;
}
}
);
}
APIUtilityVerifierBranchVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testGetPrincipalNamesFromKeytabWithPattern() throws IOException {
createKeyTab(testKeytab,testPrincipals);
Pattern httpPattern=Pattern.compile("HTTP/.*");
String[] httpPrincipals=KerberosUtil.getPrincipalNames(testKeytab,httpPattern);
Assert.assertNotNull("principals cannot be null",httpPrincipals);
int expectedSize=0;
List httpPrincipalList=Arrays.asList(httpPrincipals);
for ( String principal : testPrincipals) {
if (httpPattern.matcher(principal).matches()) {
Assert.assertTrue("missing principal " + principal,httpPrincipalList.contains(principal));
expectedSize++;
}
}
Assert.assertEquals(expectedSize,httpPrincipals.length);
}
APIUtilityVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testGetPrincipalNamesFromKeytab() throws IOException {
createKeyTab(testKeytab,testPrincipals);
String[] principals=KerberosUtil.getPrincipalNames(testKeytab);
Assert.assertNotNull("principals cannot be null",principals);
int expectedSize=0;
List principalList=Arrays.asList(principals);
for ( String principal : testPrincipals) {
Assert.assertTrue("missing principal " + principal,principalList.contains(principal));
expectedSize++;
}
Assert.assertEquals(expectedSize,principals.length);
}
IterativeVerifierBranchVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testServiceStartup(){
ServiceManager serviceManager=new ServiceManager("ServiceManager");
for (int i=0; i < NUM_OF_SERVICES; i++) {
CompositeServiceImpl service=new CompositeServiceImpl(i);
if (i == FAILED_SERVICE_SEQ_NUMBER) {
service.setThrowExceptionOnStart(true);
}
serviceManager.addTestService(service);
}
CompositeServiceImpl[] services=serviceManager.getServices().toArray(new CompositeServiceImpl[0]);
Configuration conf=new Configuration();
serviceManager.init(conf);
try {
serviceManager.start();
fail("Exception should have been thrown due to startup failure of last service");
}
catch ( ServiceTestRuntimeException e) {
for (int i=0; i < NUM_OF_SERVICES - 1; i++) {
if (i >= FAILED_SERVICE_SEQ_NUMBER && STOP_ONLY_STARTED_SERVICES) {
assertEquals("Service state should have been ",STATE.INITED,services[NUM_OF_SERVICES - 1].getServiceState());
}
else {
assertEquals("Service state should have been ",STATE.STOPPED,services[i].getServiceState());
}
}
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testRemoveService(){
CompositeService testService=new CompositeService("TestService"){
@Override public void serviceInit( Configuration conf){
Integer notAService=new Integer(0);
assertFalse("Added an integer as a service",addIfService(notAService));
Service service1=new AbstractService("Service1"){
}
;
addIfService(service1);
Service service2=new AbstractService("Service2"){
}
;
addIfService(service2);
Service service3=new AbstractService("Service3"){
}
;
addIfService(service3);
removeService(service1);
}
}
;
testService.init(new Configuration());
assertEquals("Incorrect number of services",2,testService.getServices().size());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=1000) public void testAddIfService(){
CompositeService testService=new CompositeService("TestService"){
Service service;
@Override public void serviceInit( Configuration conf){
Integer notAService=new Integer(0);
assertFalse("Added an integer as a service",addIfService(notAService));
service=new AbstractService("Service"){
}
;
assertTrue("Unable to add a service",addIfService(service));
}
}
;
testService.init(new Configuration());
assertEquals("Incorrect number of services",1,testService.getServices().size());
}
InternalCallVerifierIdentityVerifierEqualityVerifierHybridVerifier
/**
* Test that the {@link BreakableStateChangeListener} is picking up
* the state changes and that its last event field is as expected.
*/
@Test public void testEventHistory(){
register();
BreakableService service=new BreakableService();
assertListenerState(listener,Service.STATE.NOTINITED);
assertEquals(0,listener.getEventCount());
service.init(new Configuration());
assertListenerState(listener,Service.STATE.INITED);
assertSame(service,listener.getLastService());
assertListenerEventCount(listener,1);
service.start();
assertListenerState(listener,Service.STATE.STARTED);
assertListenerEventCount(listener,2);
service.stop();
assertListenerState(listener,Service.STATE.STOPPED);
assertListenerEventCount(listener,3);
}
UtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Show that if the service failed during an init
* operation, stop was called.
*/
@Test public void testStopFailingInitAndStop() throws Throwable {
BreakableService svc=new BreakableService(true,false,true);
svc.registerServiceListener(new LoggingStateChangeListener());
try {
svc.init(new Configuration());
fail("Expected a failure, got " + svc);
}
catch ( BreakableService.BrokenLifecycleEvent e) {
assertEquals(Service.STATE.INITED,e.state);
}
assertServiceStateStopped(svc);
assertEquals(Service.STATE.INITED,svc.getFailureState());
Throwable failureCause=svc.getFailureCause();
assertNotNull("Null failure cause in " + svc,failureCause);
BreakableService.BrokenLifecycleEvent cause=(BreakableService.BrokenLifecycleEvent)failureCause;
assertNotNull("null state in " + cause + " raised by "+ svc,cause.state);
assertEquals(Service.STATE.INITED,cause.state);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This test verifies that you can block waiting for something to happen
* and use notifications to manage it
* @throws Throwable on a failure
*/
@Test public void testListenerWithNotifications() throws Throwable {
AsyncSelfTerminatingService service=new AsyncSelfTerminatingService(2000);
NotifyingListener listener=new NotifyingListener();
service.registerServiceListener(listener);
service.init(new Configuration());
service.start();
assertServiceInState(service,Service.STATE.STARTED);
long start=System.currentTimeMillis();
synchronized (listener) {
listener.wait(20000);
}
long duration=System.currentTimeMillis() - start;
assertEquals(Service.STATE.STOPPED,listener.notifyingState);
assertServiceInState(service,Service.STATE.STOPPED);
assertTrue("Duration of " + duration + " too long",duration < 10000);
}
TestCleanerBranchVerifierUtilityVerifierHybridVerifier
@After public void cleanupMetaFolder(){
Path meta=new Path("/meta");
try {
if (cluster.getFileSystem().exists(meta)) {
cluster.getFileSystem().delete(meta,true);
Assert.fail("Expected meta folder to be deleted");
}
}
catch ( IOException e) {
LOG.error("Exception encountered while cleaning up folder",e);
Assert.fail("Unable to clean up meta folder");
}
}
UtilityVerifierBooleanVerifierHybridVerifier
@Test(timeout=40000) public void testCopyFailOnBlockSizeDifference(){
try {
deleteState();
createSourceDataWithDifferentBlockSize();
FileSystem fs=cluster.getFileSystem();
CopyMapper copyMapper=new CopyMapper();
StubContext stubContext=new StubContext(getConfiguration(),null,0);
Mapper.Context context=stubContext.getContext();
Configuration configuration=context.getConfiguration();
EnumSet fileAttributes=EnumSet.noneOf(DistCpOptions.FileAttribute.class);
configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),DistCpUtils.packAttributes(fileAttributes));
copyMapper.setup(context);
for ( Path path : pathList) {
final FileStatus fileStatus=fs.getFileStatus(path);
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH),path)),new CopyListingFileStatus(fileStatus),context);
}
Assert.fail("Copy should have failed because of block-size difference.");
}
catch ( Exception exception) {
Assert.assertTrue("Failure exception should have suggested the use of -pb.",exception.getCause().getCause().getMessage().contains("pb"));
Assert.assertTrue("Failure exception should have suggested the use of -skipCrc.",exception.getCause().getCause().getMessage().contains("skipCrc"));
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
/**
* If a single file is being copied to a location where the file (of the same
* name) already exists, then the file shouldn't be skipped.
*/
@Test(timeout=40000) public void testSingleFileCopy(){
try {
deleteState();
touchFile(SOURCE_PATH + "/1");
Path sourceFilePath=pathList.get(0);
Path targetFilePath=new Path(sourceFilePath.toString().replaceAll(SOURCE_PATH,TARGET_PATH));
touchFile(targetFilePath.toString());
FileSystem fs=cluster.getFileSystem();
CopyMapper copyMapper=new CopyMapper();
StubContext stubContext=new StubContext(getConfiguration(),null,0);
Mapper.Context context=stubContext.getContext();
context.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,targetFilePath.getParent().toString());
copyMapper.setup(context);
final CopyListingFileStatus sourceFileStatus=new CopyListingFileStatus(fs.getFileStatus(sourceFilePath));
long before=fs.getFileStatus(targetFilePath).getModificationTime();
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH),sourceFilePath)),sourceFileStatus,context);
long after=fs.getFileStatus(targetFilePath).getModificationTime();
Assert.assertTrue("File should have been skipped",before == after);
context.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,targetFilePath.toString());
copyMapper.setup(context);
before=fs.getFileStatus(targetFilePath).getModificationTime();
try {
Thread.sleep(2);
}
catch ( Throwable ignore) {
}
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH),sourceFilePath)),sourceFileStatus,context);
after=fs.getFileStatus(targetFilePath).getModificationTime();
Assert.assertTrue("File should have been overwritten.",before < after);
}
catch ( Exception exception) {
Assert.fail("Unexpected exception: " + exception.getMessage());
exception.printStackTrace();
}
}
APIUtilityVerifierIterativeVerifierBranchVerifierUtilityVerifierInternalCallVerifierBooleanVerifierHybridVerifier
/**
* @throws IOExceptionThere should be files in the directory named by
* ${test.build.data}/rumen/histogram-test .
* There will be pairs of files, inputXxx.json and goldXxx.json .
* We read the input file as a HistogramRawTestData in json. Then we
* create a Histogram using the data field, and then a
* LoggedDiscreteCDF using the percentiles and scale field. Finally,
* we read the corresponding goldXxx.json as a LoggedDiscreteCDF and
* deepCompare them.
*/
@Test public void testHistograms() throws IOException {
final Configuration conf=new Configuration();
final FileSystem lfs=FileSystem.getLocal(conf);
final Path rootInputDir=new Path(System.getProperty("test.tools.input.dir","")).makeQualified(lfs);
final Path rootInputFile=new Path(rootInputDir,"rumen/histogram-tests");
FileStatus[] tests=lfs.listStatus(rootInputFile);
for (int i=0; i < tests.length; ++i) {
Path filePath=tests[i].getPath();
String fileName=filePath.getName();
if (fileName.startsWith("input")) {
String testName=fileName.substring("input".length());
Path goldFilePath=new Path(rootInputFile,"gold" + testName);
assertTrue("Gold file dies not exist",lfs.exists(goldFilePath));
LoggedDiscreteCDF newResult=histogramFileToCDF(filePath,lfs);
System.out.println("Testing a Histogram for " + fileName);
FSDataInputStream goldStream=lfs.open(goldFilePath);
JsonObjectMapperParser parser=new JsonObjectMapperParser(goldStream,LoggedDiscreteCDF.class);
try {
LoggedDiscreteCDF dcdf=parser.getNext();
dcdf.deepCompare(newResult,new TreePath(null,""));
}
catch ( DeepInequalityException e) {
fail(e.path.toString());
}
finally {
parser.close();
}
}
}
}
APIUtilityVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testGetResource() throws IOException {
URL testJar=makeTestJar().toURI().toURL();
ClassLoader currentClassLoader=getClass().getClassLoader();
ClassLoader appClassloader=new ApplicationClassLoader(new URL[]{testJar},currentClassLoader,null);
assertNull("Resource should be null for current classloader",currentClassLoader.getResourceAsStream("resource.txt"));
InputStream in=appClassloader.getResourceAsStream("resource.txt");
assertNotNull("Resource should not be null for app classloader",in);
assertEquals("hello",IOUtils.toString(in));
}
BranchVerifierUtilityVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* This test creates some ExampleTasks and runs them.
*/
@Test public void testAsyncDiskService() throws Throwable {
String[] vols=new String[]{"/0","/1"};
AsyncDiskService service=new AsyncDiskService(vols);
int total=100;
for (int i=0; i < total; i++) {
service.execute(vols[i % 2],new ExampleTask());
}
Exception e=null;
try {
service.execute("no_such_volume",new ExampleTask());
}
catch ( RuntimeException ex) {
e=ex;
}
assertNotNull("Executing a task on a non-existing volume should throw an " + "Exception.",e);
service.shutdown();
if (!service.awaitTermination(5000)) {
fail("AsyncDiskService didn't shutdown in 5 seconds.");
}
assertEquals(total,count);
}
Class: org.apache.hadoop.util.TestClassUtil
BooleanVerifierNullVerifierHybridVerifier
@Test(timeout=1000) public void testFindContainingJar(){
String containingJar=ClassUtil.findContainingJar(Logger.class);
Assert.assertNotNull("Containing jar not found for Logger",containingJar);
File jarFile=new File(containingJar);
Assert.assertTrue("Containing jar does not exist on file system",jarFile.exists());
Assert.assertTrue("Incorrect jar file" + containingJar,jarFile.getName().matches("log4j.+[.]jar"));
}
IterativeVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testAdditionsAndRemovals(){
IdentityHashStore store=new IdentityHashStore(0);
final int NUM_KEYS=1000;
LOG.debug("generating " + NUM_KEYS + " keys");
final List keys=new ArrayList(NUM_KEYS);
for (int i=0; i < NUM_KEYS; i++) {
keys.add(new Key("key " + i));
}
for (int i=0; i < NUM_KEYS; i++) {
store.put(keys.get(i),i);
}
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
Assert.assertTrue(keys.contains(k));
}
}
);
for (int i=0; i < NUM_KEYS; i++) {
Assert.assertEquals(Integer.valueOf(i),store.remove(keys.get(i)));
}
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
Assert.fail("expected all entries to be removed");
}
}
);
Assert.assertTrue("expected the store to be " + "empty, but found " + store.numElements() + " elements.",store.isEmpty());
Assert.assertEquals(1024,store.capacity());
}
UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testStartingWithZeroCapacity(){
IdentityHashStore store=new IdentityHashStore(0);
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
Assert.fail("found key " + k + " in empty IdentityHashStore.");
}
}
);
Assert.assertTrue(store.isEmpty());
final Key key1=new Key("key1");
Integer value1=new Integer(100);
store.put(key1,value1);
Assert.assertTrue(!store.isEmpty());
Assert.assertEquals(value1,store.get(key1));
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
Assert.assertEquals(key1,k);
}
}
);
Assert.assertEquals(value1,store.remove(key1));
Assert.assertTrue(store.isEmpty());
}
IterativeVerifierUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testDuplicateInserts(){
IdentityHashStore store=new IdentityHashStore(4);
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
Assert.fail("found key " + k + " in empty IdentityHashStore.");
}
}
);
Assert.assertTrue(store.isEmpty());
Key key1=new Key("key1");
Integer value1=new Integer(100);
Integer value2=new Integer(200);
Integer value3=new Integer(300);
store.put(key1,value1);
Key equalToKey1=new Key("key1");
Assert.assertNull(store.get(equalToKey1));
Assert.assertTrue(!store.isEmpty());
Assert.assertEquals(value1,store.get(key1));
store.put(key1,value2);
store.put(key1,value3);
final List allValues=new LinkedList();
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
allValues.add(v);
}
}
);
Assert.assertEquals(3,allValues.size());
for (int i=0; i < 3; i++) {
Integer value=store.remove(key1);
Assert.assertTrue(allValues.remove(value));
}
Assert.assertNull(store.remove(key1));
Assert.assertTrue(store.isEmpty());
}
BranchVerifierUtilityVerifierBooleanVerifierHybridVerifier
@Test public void testNativeCodeLoaded(){
if (requireTestJni() == false) {
LOG.info("TestNativeCodeLoader: libhadoop.so testing is not required.");
return;
}
if (!NativeCodeLoader.isNativeCodeLoaded()) {
fail("TestNativeCodeLoader: libhadoop.so testing was required, but " + "libhadoop.so was not loaded.");
}
assertFalse(NativeCodeLoader.getLibraryName().isEmpty());
assertFalse(ZlibFactory.getLibraryName().isEmpty());
if (NativeCodeLoader.buildSupportsSnappy()) {
assertFalse(SnappyCodec.getLibraryName().isEmpty());
}
if (NativeCodeLoader.buildSupportsOpenssl()) {
assertFalse(OpensslCipher.getLibraryName().isEmpty());
}
assertFalse(Lz4Codec.getLibraryName().isEmpty());
LOG.info("TestNativeCodeLoader: libhadoop.so is loaded.");
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testStringToURI(){
String[] str=new String[]{"file://"};
try {
StringUtils.stringToURI(str);
fail("Ignoring URISyntaxException while creating URI from string file://");
}
catch ( IllegalArgumentException iae) {
assertEquals("Failed to create uri for file://",iae.getMessage());
}
}
IterativeVerifierUtilityVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
@Test(timeout=30000) public void testTraditionalBinaryPrefix() throws Exception {
String[] symbol={"k","m","g","t","p","e"};
long m=1024;
for ( String s : symbol) {
assertEquals(0,string2long(0 + s));
assertEquals(m,string2long(1 + s));
m*=1024;
}
assertEquals(0L,string2long("0"));
assertEquals(1024L,string2long("1k"));
assertEquals(-1024L,string2long("-1k"));
assertEquals(1259520L,string2long("1230K"));
assertEquals(-1259520L,string2long("-1230K"));
assertEquals(104857600L,string2long("100m"));
assertEquals(-104857600L,string2long("-100M"));
assertEquals(956703965184L,string2long("891g"));
assertEquals(-956703965184L,string2long("-891G"));
assertEquals(501377302265856L,string2long("456t"));
assertEquals(-501377302265856L,string2long("-456T"));
assertEquals(11258999068426240L,string2long("10p"));
assertEquals(-11258999068426240L,string2long("-10P"));
assertEquals(1152921504606846976L,string2long("1e"));
assertEquals(-1152921504606846976L,string2long("-1E"));
String tooLargeNumStr="10e";
try {
string2long(tooLargeNumStr);
fail("Test passed for a number " + tooLargeNumStr + " too large");
}
catch ( IllegalArgumentException e) {
assertEquals(tooLargeNumStr + " does not fit in a Long",e.getMessage());
}
String tooSmallNumStr="-10e";
try {
string2long(tooSmallNumStr);
fail("Test passed for a number " + tooSmallNumStr + " too small");
}
catch ( IllegalArgumentException e) {
assertEquals(tooSmallNumStr + " does not fit in a Long",e.getMessage());
}
String invalidFormatNumStr="10kb";
char invalidPrefix='b';
try {
string2long(invalidFormatNumStr);
fail("Test passed for a number " + invalidFormatNumStr + " has invalid format");
}
catch ( IllegalArgumentException e) {
assertEquals("Invalid size prefix '" + invalidPrefix + "' in '"+ invalidFormatNumStr+ "'. Allowed prefixes are k, m, g, t, p, e(case insensitive)",e.getMessage());
}
assertEquals("0",long2String(0,null,2));
for (int decimalPlace=0; decimalPlace < 2; decimalPlace++) {
for (int n=1; n < TraditionalBinaryPrefix.KILO.value; n++) {
assertEquals(n + "",long2String(n,null,decimalPlace));
assertEquals(-n + "",long2String(-n,null,decimalPlace));
}
assertEquals("1 K",long2String(1L << 10,null,decimalPlace));
assertEquals("-1 K",long2String(-1L << 10,null,decimalPlace));
}
assertEquals("8.00 E",long2String(Long.MAX_VALUE,null,2));
assertEquals("8.00 E",long2String(Long.MAX_VALUE - 1,null,2));
assertEquals("-8 E",long2String(Long.MIN_VALUE,null,2));
assertEquals("-8.00 E",long2String(Long.MIN_VALUE + 1,null,2));
final String[] zeros={" ",".0 ",".00 "};
for (int decimalPlace=0; decimalPlace < zeros.length; decimalPlace++) {
final String trailingZeros=zeros[decimalPlace];
for (int e=11; e < Long.SIZE - 1; e++) {
final TraditionalBinaryPrefix p=TraditionalBinaryPrefix.values()[e / 10 - 1];
{
final long n=1L << e;
final String expected=(n / p.value) + " " + p.symbol;
assertEquals("n=" + n,expected,long2String(n,null,2));
}
{
final long n=(1L << e) + 1;
final String expected=(n / p.value) + trailingZeros + p.symbol;
assertEquals("n=" + n,expected,long2String(n,null,decimalPlace));
}
{
final long n=(1L << e) - 1;
final String expected=((n + 1) / p.value) + trailingZeros + p.symbol;
assertEquals("n=" + n,expected,long2String(n,null,decimalPlace));
}
}
}
assertEquals("1.50 K",long2String(3L << 9,null,2));
assertEquals("1.5 K",long2String(3L << 9,null,1));
assertEquals("1.50 M",long2String(3L << 19,null,2));
assertEquals("2 M",long2String(3L << 19,null,0));
assertEquals("3 G",long2String(3L << 30,null,2));
assertEquals("0 B",StringUtils.byteDesc(0));
assertEquals("-100 B",StringUtils.byteDesc(-100));
assertEquals("1 KB",StringUtils.byteDesc(1024));
assertEquals("1.50 KB",StringUtils.byteDesc(3L << 9));
assertEquals("1.50 MB",StringUtils.byteDesc(3L << 19));
assertEquals("3 GB",StringUtils.byteDesc(3L << 30));
assertEquals("10%",StringUtils.formatPercent(0.1,0));
assertEquals("10.0%",StringUtils.formatPercent(0.1,1));
assertEquals("10.00%",StringUtils.formatPercent(0.1,2));
assertEquals("1%",StringUtils.formatPercent(0.00543,0));
assertEquals("0.5%",StringUtils.formatPercent(0.00543,1));
assertEquals("0.54%",StringUtils.formatPercent(0.00543,2));
assertEquals("0.543%",StringUtils.formatPercent(0.00543,3));
assertEquals("0.5430%",StringUtils.formatPercent(0.00543,4));
}
Class: org.apache.hadoop.util.TestWinUtils
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testLs() throws IOException {
final String content="6bytes";
final int contentSize=content.length();
File testFile=new File(TEST_DIR,"file1");
writeFile(testFile,content);
String output=Shell.execCommand(Shell.WINUTILS,"ls",testFile.getCanonicalPath());
String[] outputArgs=output.split("[ \r\n]");
assertTrue(outputArgs[0].equals("-rwx------"));
assertTrue(outputArgs[outputArgs.length - 1].equals(testFile.getCanonicalPath()));
output=Shell.execCommand(Shell.WINUTILS,"ls","-F",testFile.getCanonicalPath());
outputArgs=output.split("[|\r\n]");
assertEquals(9,outputArgs.length);
assertTrue(outputArgs[0].equals("-rwx------"));
assertEquals(contentSize,Long.parseLong(outputArgs[4]));
assertTrue(outputArgs[8].equals(testFile.getCanonicalPath()));
testFile.delete();
assertFalse(testFile.exists());
}
APIUtilityVerifierUtilityVerifierBooleanVerifierHybridVerifier
@Test(timeout=30000) public void testSymlinkRejectsForwardSlashesInLink() throws IOException {
File newFile=new File(TEST_DIR,"file");
assertTrue(newFile.createNewFile());
String target=newFile.getPath();
String link=new File(TEST_DIR,"link").getPath().replaceAll("\\\\","/");
try {
Shell.execCommand(Shell.WINUTILS,"symlink",link,target);
fail(String.format("did not receive expected failure creating symlink " + "with forward slashes in link: link = %s, target = %s",link,target));
}
catch ( IOException e) {
LOG.info("Expected: Failed to create symlink with forward slashes in target");
}
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Validate behavior of chmod commands on directories on Windows.
*/
@Test(timeout=30000) public void testBasicChmodOnDir() throws IOException {
File a=new File(TEST_DIR,"a");
File b=new File(a,"b");
a.mkdirs();
assertTrue(b.createNewFile());
chmod("300",a);
String[] files=a.list();
assertTrue("Listing a directory without read permission should fail",null == files);
chmod("700",a);
files=a.list();
assertEquals("b",files[0]);
chmod("500",a);
File c=new File(a,"c");
try {
c.createNewFile();
assertFalse("writeFile should have failed!",true);
}
catch ( IOException ex) {
LOG.info("Expected: Failed to create a file when directory " + "permissions are 577");
}
assertTrue("Special behavior: deleting a file will succeed on Windows " + "even if a user does not have write permissions on the parent dir",b.delete());
assertFalse("Renaming a file should fail on the dir where a user does " + "not have write permissions",b.renameTo(new File(a,"d")));
chmod("700",a);
assertTrue(c.createNewFile());
File d=new File(a,"d");
assertTrue(c.renameTo(d));
chmod("600",a);
files=a.list();
assertEquals("d",files[0]);
assertTrue(d.delete());
File e=new File(a,"e");
assertTrue(e.createNewFile());
assertTrue(e.renameTo(new File(a,"f")));
chmod("700",a);
}
APIUtilityVerifierUtilityVerifierBooleanVerifierHybridVerifier
@Test(timeout=30000) public void testSymlinkRejectsForwardSlashesInTarget() throws IOException {
File newFile=new File(TEST_DIR,"file");
assertTrue(newFile.createNewFile());
String target=newFile.getPath().replaceAll("\\\\","/");
String link=new File(TEST_DIR,"link").getPath();
try {
Shell.execCommand(Shell.WINUTILS,"symlink",link,target);
fail(String.format("did not receive expected failure creating symlink " + "with forward slashes in target: link = %s, target = %s",link,target));
}
catch ( IOException e) {
LOG.info("Expected: Failed to create symlink with forward slashes in target");
}
}
APIUtilityVerifierUtilityVerifierBooleanVerifierConditionMatcherHybridVerifier
@Test(timeout=30000) public void testReadLink() throws IOException {
File dir1=new File(TEST_DIR,"dir1");
assertTrue(dir1.mkdirs());
File file1=new File(dir1,"file1.txt");
assertTrue(file1.createNewFile());
File dirLink=new File(TEST_DIR,"dlink");
File fileLink=new File(TEST_DIR,"flink");
Shell.execCommand(Shell.WINUTILS,"symlink",dirLink.toString(),dir1.toString());
Shell.execCommand(Shell.WINUTILS,"symlink",fileLink.toString(),file1.toString());
String readLinkOutput=Shell.execCommand(Shell.WINUTILS,"readlink",dirLink.toString());
assertThat(readLinkOutput,equalTo(dir1.toString()));
readLinkOutput=Shell.execCommand(Shell.WINUTILS,"readlink",fileLink.toString());
assertThat(readLinkOutput,equalTo(file1.toString()));
try {
Shell.execCommand(Shell.WINUTILS,"readlink","");
fail("Failed to get Shell.ExitCodeException when reading bad symlink");
}
catch ( Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(),is(1));
}
try {
Shell.execCommand(Shell.WINUTILS,"readlink","ThereIsNoSuchLink");
fail("Failed to get Shell.ExitCodeException when reading bad symlink");
}
catch ( Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(),is(1));
}
try {
Shell.execCommand(Shell.WINUTILS,"readlink",dir1.toString());
fail("Failed to get Shell.ExitCodeException when reading bad symlink");
}
catch ( Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(),is(1));
}
try {
Shell.execCommand(Shell.WINUTILS,"readlink",file1.toString());
fail("Failed to get Shell.ExitCodeException when reading bad symlink");
}
catch ( Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(),is(1));
}
try {
Shell.execCommand(Shell.WINUTILS,"readlink","a","b");
fail("Failed to get Shell.ExitCodeException with bad parameters");
}
catch ( Shell.ExitCodeException ece) {
assertThat(ece.getExitCode(),is(1));
}
}
TestInitializerAssumptionSetterHybridVerifier
@Before public void setUp(){
assumeTrue(Shell.WINDOWS);
TEST_DIR.mkdirs();
}
InternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test CancelDelegationTokenRequestPBImpl.
* Test a transformation to prototype and back
*/
@Test public void testCancelDelegationTokenRequestPBImpl(){
Token token=getDelegationToken();
CancelDelegationTokenRequestPBImpl original=new CancelDelegationTokenRequestPBImpl();
original.setDelegationToken(token);
CancelDelegationTokenRequestProto protoType=original.getProto();
CancelDelegationTokenRequestPBImpl copy=new CancelDelegationTokenRequestPBImpl(protoType);
assertNotNull(copy.getDelegationToken());
assertEquals(token,copy.getDelegationToken());
}
InternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test RenewDelegationTokenRequestPBImpl.
* Test a transformation to prototype and back
*/
@Test public void testRenewDelegationTokenRequestPBImpl(){
Token token=getDelegationToken();
RenewDelegationTokenRequestPBImpl original=new RenewDelegationTokenRequestPBImpl();
original.setDelegationToken(token);
RenewDelegationTokenRequestProto protoType=original.getProto();
RenewDelegationTokenRequestPBImpl copy=new RenewDelegationTokenRequestPBImpl(protoType);
assertNotNull(copy.getDelegationToken());
assertEquals(token,copy.getDelegationToken());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
/**
* Simple test Resource request.
* Test hashCode, equals and compare.
*/
@Test public void testResourceRequest(){
Resource resource=recordFactory.newRecordInstance(Resource.class);
Priority priority=recordFactory.newRecordInstance(Priority.class);
ResourceRequest original=ResourceRequest.newInstance(priority,"localhost",resource,2);
ResourceRequest copy=ResourceRequest.newInstance(priority,"localhost",resource,2);
assertTrue(original.equals(copy));
assertEquals(0,original.compareTo(copy));
assertTrue(original.hashCode() == copy.hashCode());
copy.setNumContainers(1);
assertFalse(original.equals(copy));
assertNotSame(0,original.compareTo(copy));
assertFalse(original.hashCode() == copy.hashCode());
}
UtilityVerifierBooleanVerifierHybridVerifier
@Test public void testPostEntitiesNoResponse() throws Exception {
mockClientResponse(client,ClientResponse.Status.INTERNAL_SERVER_ERROR,false,false);
try {
client.putEntities(generateEntity());
Assert.fail("Exception is expected");
}
catch ( YarnException e) {
Assert.assertTrue(e.getMessage().contains("Failed to get the response from the timeline server."));
}
}
APIUtilityVerifierInternalCallVerifierIdentityVerifierEqualityVerifierHybridVerifier
@Test public void testRMWebUrlSpecified() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS,"fortesting:24543");
conf.set(YarnConfiguration.RM_ADDRESS,"rmtesting:9999");
String rmWebUrl=WebAppUtils.getRMWebAppURLWithScheme(conf);
String[] parts=rmWebUrl.split(":");
Assert.assertEquals("RM Web URL Port is incrrect",24543,Integer.valueOf(parts[parts.length - 1]).intValue());
Assert.assertNotSame("RM Web Url not resolved correctly. Should not be rmtesting","http://rmtesting:24543",rmWebUrl);
}
Class: org.apache.hadoop.yarn.lib.TestZKClient
TestCleanerAPIUtilityVerifierBranchVerifierBooleanVerifierHybridVerifier
@After public void tearDown() throws IOException, InterruptedException {
if (zks != null) {
ZKDatabase zkDb=zks.getZKDatabase();
factory.shutdown();
try {
zkDb.close();
}
catch ( IOException ie) {
}
final int PORT=Integer.parseInt(hostPort.split(":")[1]);
Assert.assertTrue("waiting for server down",waitForServerDown("127.0.0.1:" + PORT,CONNECTION_TIMEOUT));
}
}
APIUtilityVerifierTestInitializerBooleanVerifierHybridVerifier
@Before public void setUp() throws IOException, InterruptedException {
System.setProperty("zookeeper.preAllocSize","100");
FileTxnLog.setPreallocSize(100 * 1024);
if (!BASETEST.exists()) {
BASETEST.mkdirs();
}
File dataDir=createTmpDir(BASETEST);
zks=new ZooKeeperServer(dataDir,dataDir,3000);
final int PORT=Integer.parseInt(hostPort.split(":")[1]);
if (factory == null) {
factory=new NIOServerCnxnFactory();
factory.configure(new InetSocketAddress(PORT),maxCnxns);
}
factory.startup(zks);
Assert.assertTrue("waiting for server up",waitForServerUp("127.0.0.1:" + PORT,CONNECTION_TIMEOUT));
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierHybridVerifier
@Test public void testContainerKill() throws Exception {
if (!shouldRun()) {
return;
}
final ContainerId sleepId=getNextContainerId();
Thread t=new Thread(){
public void run(){
try {
runAndBlock(sleepId,"sleep","100");
}
catch ( IOException e) {
LOG.warn("Caught exception while running sleep",e);
}
}
}
;
t.setDaemon(true);
t.start();
assertTrue(t.isAlive());
String pid=null;
int count=10;
while ((pid=exec.getProcessId(sleepId)) == null && count > 0) {
LOG.info("Sleeping for 200 ms before checking for pid ");
Thread.sleep(200);
count--;
}
assertNotNull(pid);
LOG.info("Going to killing the process.");
exec.signalContainer(appSubmitter,pid,Signal.TERM);
LOG.info("sleeping for 100ms to let the sleep be killed");
Thread.sleep(100);
assertFalse(t.isAlive());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testNodeDecommision() throws Exception {
nm=getNodeManager(NodeAction.SHUTDOWN);
YarnConfiguration conf=createNMConfig();
nm.init(conf);
Assert.assertEquals(STATE.INITED,nm.getServiceState());
nm.start();
int waitCount=0;
while (heartBeatID < 1 && waitCount++ != 200) {
Thread.sleep(500);
}
Assert.assertFalse(heartBeatID < 1);
Assert.assertTrue(nm.getNMContext().getDecommissioned());
waitCount=0;
while (nm.getServiceState() != STATE.STOPPED && waitCount++ != 20) {
LOG.info("Waiting for NM to stop..");
Thread.sleep(1000);
}
Assert.assertEquals(STATE.STOPPED,nm.getServiceState());
}
UtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierHybridVerifier
@Test(timeout=150000) public void testNMConnectionToRM() throws Exception {
final long delta=50000;
final long connectionWaitMs=5000;
final long connectionRetryIntervalMs=1000;
final long rmStartIntervalMS=2 * 1000;
conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS,connectionWaitMs);
conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS,connectionRetryIntervalMs);
NodeManagerWithCustomNodeStatusUpdater nmWithUpdater;
nm=nmWithUpdater=new NodeManagerWithCustomNodeStatusUpdater(){
@Override protected NodeStatusUpdater createUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){
NodeStatusUpdater nodeStatusUpdater=new MyNodeStatusUpdater4(context,dispatcher,healthChecker,metrics,rmStartIntervalMS,true);
return nodeStatusUpdater;
}
}
;
nm.init(conf);
long waitStartTime=System.currentTimeMillis();
try {
nm.start();
Assert.fail("NM should have failed to start due to RM connect failure");
}
catch ( Exception e) {
long t=System.currentTimeMillis();
long duration=t - waitStartTime;
boolean waitTimeValid=(duration >= connectionWaitMs) && (duration < (connectionWaitMs + delta));
if (!waitTimeValid) {
throw new Exception("NM should have tried re-connecting to RM during " + "period of at least " + connectionWaitMs + " ms, but "+ "stopped retrying within "+ (connectionWaitMs + delta)+ " ms: "+ e,e);
}
}
nm=nmWithUpdater=new NodeManagerWithCustomNodeStatusUpdater(){
@Override protected NodeStatusUpdater createUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){
NodeStatusUpdater nodeStatusUpdater=new MyNodeStatusUpdater4(context,dispatcher,healthChecker,metrics,rmStartIntervalMS,false);
return nodeStatusUpdater;
}
}
;
nm.init(conf);
NodeStatusUpdater updater=nmWithUpdater.getUpdater();
Assert.assertNotNull("Updater not yet created ",updater);
waitStartTime=System.currentTimeMillis();
try {
nm.start();
}
catch ( Exception ex) {
LOG.error("NM should have started successfully " + "after connecting to RM.",ex);
throw ex;
}
long duration=System.currentTimeMillis() - waitStartTime;
MyNodeStatusUpdater4 myUpdater=(MyNodeStatusUpdater4)updater;
Assert.assertTrue("NM started before updater triggered",myUpdater.isTriggered());
Assert.assertTrue("NM should have connected to RM after " + "the start interval of " + rmStartIntervalMS + ": actual "+ duration+ " "+ myUpdater,(duration >= rmStartIntervalMS));
Assert.assertTrue("NM should have connected to RM less than " + (rmStartIntervalMS + delta) + " milliseconds of RM starting up: actual "+ duration+ " "+ myUpdater,(duration < (rmStartIntervalMS + delta)));
}
IterativeVerifierBranchVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
@Test public void testNMRegistration() throws InterruptedException {
nm=new NodeManager(){
@Override protected NodeStatusUpdater createNodeStatusUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){
return new MyNodeStatusUpdater(context,dispatcher,healthChecker,metrics);
}
}
;
YarnConfiguration conf=createNMConfig();
nm.init(conf);
Object[] services=nm.getServices().toArray();
Object lastService=services[services.length - 1];
Assert.assertTrue("last service is NOT the node status updater",lastService instanceof NodeStatusUpdater);
new Thread(){
public void run(){
try {
nm.start();
}
catch ( Throwable e) {
TestNodeStatusUpdater.this.nmStartError=e;
throw new YarnRuntimeException(e);
}
}
}
.start();
System.out.println(" ----- thread already started.." + nm.getServiceState());
int waitCount=0;
while (nm.getServiceState() == STATE.INITED && waitCount++ != 50) {
LOG.info("Waiting for NM to start..");
if (nmStartError != null) {
LOG.error("Error during startup. ",nmStartError);
Assert.fail(nmStartError.getCause().getMessage());
}
Thread.sleep(2000);
}
if (nm.getServiceState() != STATE.STARTED) {
Assert.fail("NodeManager failed to start");
}
waitCount=0;
while (heartBeatID <= 3 && waitCount++ != 200) {
Thread.sleep(1000);
}
Assert.assertFalse(heartBeatID <= 3);
Assert.assertEquals("Number of registered NMs is wrong!!",1,this.registeredNodes.size());
nm.stop();
}
InternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
@Test public void testApplicationKeepAlive() throws Exception {
MyNodeManager nm=new MyNodeManager();
try {
YarnConfiguration conf=createNMConfig();
conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,true);
conf.setLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,4000l);
nm.init(conf);
nm.start();
while (heartBeatID < 12) {
Thread.sleep(1000l);
}
MyResourceTracker3 rt=(MyResourceTracker3)nm.getNodeStatusUpdater().getRMClient();
rt.context.getApplications().remove(rt.appId);
Assert.assertEquals(1,rt.keepAliveRequests.size());
int numKeepAliveRequests=rt.keepAliveRequests.get(rt.appId).size();
LOG.info("Number of Keep Alive Requests: [" + numKeepAliveRequests + "]");
Assert.assertTrue(numKeepAliveRequests == 2 || numKeepAliveRequests == 3);
while (heartBeatID < 20) {
Thread.sleep(1000l);
}
int numKeepAliveRequests2=rt.keepAliveRequests.get(rt.appId).size();
Assert.assertEquals(numKeepAliveRequests,numKeepAliveRequests2);
}
finally {
if (nm.getServiceState() == STATE.STARTED) nm.stop();
}
}
BranchVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testAuxServices(){
Configuration conf=new Configuration();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv","Bsrv"});
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv"),ServiceA.class,Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv"),ServiceB.class,Service.class);
final AuxServices aux=new AuxServices();
aux.init(conf);
int latch=1;
for ( Service s : aux.getServices()) {
assertEquals(INITED,s.getServiceState());
if (s instanceof ServiceA) {
latch*=2;
}
else if (s instanceof ServiceB) {
latch*=3;
}
else fail("Unexpected service type " + s.getClass());
}
assertEquals("Invalid mix of services",6,latch);
aux.start();
for ( Service s : aux.getServices()) {
assertEquals(STARTED,s.getServiceState());
}
aux.stop();
for ( Service s : aux.getServices()) {
assertEquals(STOPPED,s.getServiceState());
}
}
BranchVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testAuxServicesMeta(){
Configuration conf=new Configuration();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv","Bsrv"});
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv"),ServiceA.class,Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv"),ServiceB.class,Service.class);
final AuxServices aux=new AuxServices();
aux.init(conf);
int latch=1;
for ( Service s : aux.getServices()) {
assertEquals(INITED,s.getServiceState());
if (s instanceof ServiceA) {
latch*=2;
}
else if (s instanceof ServiceB) {
latch*=3;
}
else fail("Unexpected service type " + s.getClass());
}
assertEquals("Invalid mix of services",6,latch);
aux.start();
for ( Service s : aux.getServices()) {
assertEquals(STARTED,s.getServiceState());
}
Map meta=aux.getMetaData();
assertEquals(2,meta.size());
assertEquals("A",new String(meta.get("Asrv").array()));
assertEquals("B",new String(meta.get("Bsrv").array()));
aux.stop();
for ( Service s : aux.getServices()) {
assertEquals(STOPPED,s.getServiceState());
}
}
UtilityVerifierBooleanVerifierHybridVerifier
@Test public void testValidAuxServiceName(){
final AuxServices aux=new AuxServices();
Configuration conf=new Configuration();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv1","Bsrv_2"});
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv1"),ServiceA.class,Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv_2"),ServiceB.class,Service.class);
try {
aux.init(conf);
}
catch ( Exception ex) {
Assert.fail("Should not receive the exception.");
}
final AuxServices aux1=new AuxServices();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"1Asrv1"});
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"1Asrv1"),ServiceA.class,Service.class);
try {
aux1.init(conf);
Assert.fail("Should receive the exception.");
}
catch ( Exception ex) {
assertTrue(ex.getMessage().contains("The ServiceName: 1Asrv1 set in " + "yarn.nodemanager.aux-services is invalid.The valid service name " + "should only contain a-zA-Z0-9_ and can not start with numbers"));
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testIncrementFileCountForPath(){
YarnConfiguration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,LocalCacheDirectoryManager.DIRECTORIES_PER_LEVEL + 2);
LocalCacheDirectoryManager mgr=new LocalCacheDirectoryManager(conf);
final String rootPath="";
mgr.incrementFileCountForPath(rootPath);
Assert.assertEquals(rootPath,mgr.getRelativePathForLocalization());
Assert.assertFalse("root dir should be full",rootPath.equals(mgr.getRelativePathForLocalization()));
mgr.getRelativePathForLocalization();
mgr.decrementFileCountForPath(rootPath);
mgr.decrementFileCountForPath(rootPath);
Assert.assertEquals(rootPath,mgr.getRelativePathForLocalization());
Assert.assertEquals(rootPath,mgr.getRelativePathForLocalization());
String otherDir=mgr.getRelativePathForLocalization();
Assert.assertFalse("root dir should be full",otherDir.equals(rootPath));
final String deepDir0="d/e/e/p/0";
final String deepDir1="d/e/e/p/1";
final String deepDir2="d/e/e/p/2";
final String deepDir3="d/e/e/p/3";
mgr.incrementFileCountForPath(deepDir0);
Assert.assertEquals(otherDir,mgr.getRelativePathForLocalization());
Assert.assertEquals(deepDir0,mgr.getRelativePathForLocalization());
Assert.assertEquals("total dir count incorrect after increment",deepDir1,mgr.getRelativePathForLocalization());
mgr.incrementFileCountForPath(deepDir2);
mgr.incrementFileCountForPath(deepDir1);
mgr.incrementFileCountForPath(deepDir2);
Assert.assertEquals(deepDir3,mgr.getRelativePathForLocalization());
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testRMAppSubmitDuplicateApplicationId() throws Exception {
ApplicationId appId=MockApps.newAppID(0);
asContext.setApplicationId(appId);
RMApp appOrig=rmContext.getRMApps().get(appId);
Assert.assertTrue("app name matches but shouldn't","testApp1" != appOrig.getName());
try {
appMonitor.submitApplication(asContext,"test");
Assert.fail("Exception is expected when applicationId is duplicate.");
}
catch ( YarnException e) {
Assert.assertTrue("The thrown exception is not the expectd one.",e.getMessage().contains("Cannot add a duplicate!"));
}
RMApp app=rmContext.getRMApps().get(appId);
Assert.assertNotNull("app is null",app);
Assert.assertEquals("app id doesn't match",appId,app.getApplicationId());
Assert.assertEquals("app state doesn't match",RMAppState.FINISHED,app.getState());
}
UtilityVerifierBooleanVerifierHybridVerifier
@Test(timeout=30000) public void testRMAppSubmitInvalidResourceRequest() throws Exception {
asContext.setResource(Resources.createResource(YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB + 1));
try {
appMonitor.submitApplication(asContext,"test");
Assert.fail("Application submission should fail because resource" + " request is invalid.");
}
catch ( YarnException e) {
Assert.assertTrue("The thrown exception is not" + " InvalidResourceRequestException",e.getMessage().contains("Invalid resource request"));
}
}
APIUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testRMAppSubmit() throws Exception {
appMonitor.submitApplication(asContext,"test");
RMApp app=rmContext.getRMApps().get(appId);
Assert.assertNotNull("app is null",app);
Assert.assertEquals("app id doesn't match",appId,app.getApplicationId());
Assert.assertEquals("app state doesn't match",RMAppState.NEW,app.getState());
int timeoutSecs=0;
while ((getAppEventType() == RMAppEventType.KILL) && timeoutSecs++ < 20) {
Thread.sleep(1000);
}
Assert.assertEquals("app event type sent is wrong",RMAppEventType.START,getAppEventType());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testForceKillApplication() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
MockRM rm=new MockRM();
rm.init(conf);
rm.start();
ClientRMService rmService=rm.getClientRMService();
GetApplicationsRequest getRequest=GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.KILLED));
RMApp app1=rm.submitApp(1024);
RMApp app2=rm.submitApp(1024,true);
assertEquals("Incorrect number of apps in the RM",0,rmService.getApplications(getRequest).getApplicationList().size());
KillApplicationRequest killRequest1=KillApplicationRequest.newInstance(app1.getApplicationId());
KillApplicationRequest killRequest2=KillApplicationRequest.newInstance(app2.getApplicationId());
int killAttemptCount=0;
for (int i=0; i < 100; i++) {
KillApplicationResponse killResponse1=rmService.forceKillApplication(killRequest1);
killAttemptCount++;
if (killResponse1.getIsKillCompleted()) {
break;
}
Thread.sleep(10);
}
assertTrue("Kill attempt count should be greater than 1 for managed AMs",killAttemptCount > 1);
assertEquals("Incorrect number of apps in the RM",1,rmService.getApplications(getRequest).getApplicationList().size());
KillApplicationResponse killResponse2=rmService.forceKillApplication(killRequest2);
assertTrue("Killing UnmanagedAM should falsely acknowledge true",killResponse2.getIsKillCompleted());
for (int i=0; i < 100; i++) {
if (2 == rmService.getApplications(getRequest).getApplicationList().size()) {
break;
}
Thread.sleep(10);
}
assertEquals("Incorrect number of apps in the RM",2,rmService.getApplications(getRequest).getApplicationList().size());
}
UtilityVerifierBooleanVerifierHybridVerifier
@Test(timeout=30000) public void testConfValidation() throws Exception {
FifoScheduler scheduler=new FifoScheduler();
Configuration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,2048);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,1024);
try {
scheduler.serviceInit(conf);
fail("Exception is expected because the min memory allocation is" + " larger than the max memory allocation.");
}
catch ( YarnRuntimeException e) {
assertTrue("The thrown exception is not the expected one.",e.getMessage().startsWith("Invalid resource scheduler memory"));
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testRMDispatcherForHA() throws IOException {
String errorMessageForEventHandler="Expect to get the same number of handlers";
String errorMessageForService="Expect to get the same number of services";
configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
Configuration conf=new YarnConfiguration(configuration);
rm=new MockRM(conf){
@Override protected Dispatcher createDispatcher(){
return new MyCountingDispatcher();
}
}
;
rm.init(conf);
int expectedEventHandlerCount=((MyCountingDispatcher)rm.getRMContext().getDispatcher()).getEventHandlerCount();
int expectedServiceCount=rm.getServices().size();
assertTrue(expectedEventHandlerCount != 0);
StateChangeRequestInfo requestInfo=new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER);
assertEquals(STATE_ERR,HAServiceState.INITIALIZING,rm.adminService.getServiceStatus().getState());
assertFalse("RM is ready to become active before being started",rm.adminService.getServiceStatus().isReadyToBecomeActive());
rm.start();
rm.adminService.transitionToStandby(requestInfo);
rm.adminService.transitionToActive(requestInfo);
rm.adminService.transitionToStandby(requestInfo);
rm.adminService.transitionToActive(requestInfo);
rm.adminService.transitionToStandby(requestInfo);
MyCountingDispatcher dispatcher=(MyCountingDispatcher)rm.getRMContext().getDispatcher();
assertTrue(!dispatcher.isStopped());
rm.adminService.transitionToActive(requestInfo);
assertEquals(errorMessageForEventHandler,expectedEventHandlerCount,((MyCountingDispatcher)rm.getRMContext().getDispatcher()).getEventHandlerCount());
assertEquals(errorMessageForService,expectedServiceCount,rm.getServices().size());
dispatcher=(MyCountingDispatcher)rm.getRMContext().getDispatcher();
rm.adminService.transitionToStandby(requestInfo);
assertEquals(errorMessageForEventHandler,expectedEventHandlerCount,((MyCountingDispatcher)rm.getRMContext().getDispatcher()).getEventHandlerCount());
assertEquals(errorMessageForService,expectedServiceCount,rm.getServices().size());
assertTrue(dispatcher.isStopped());
rm.stop();
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test to verify the following RM HA transitions to the following states.
* 1. Standby: Should be a no-op
* 2. Active: Active services should start
* 3. Active: Should be a no-op.
* While active, submit a couple of jobs
* 4. Standby: Active services should stop
* 5. Active: Active services should start
* 6. Stop the RM: All services should stop and RM should not be ready to
* become Active
*/
@Test(timeout=30000) public void testFailoverAndTransitions() throws Exception {
configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
Configuration conf=new YarnConfiguration(configuration);
rm=new MockRM(conf);
rm.init(conf);
StateChangeRequestInfo requestInfo=new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER);
assertEquals(STATE_ERR,HAServiceState.INITIALIZING,rm.adminService.getServiceStatus().getState());
assertFalse("RM is ready to become active before being started",rm.adminService.getServiceStatus().isReadyToBecomeActive());
checkMonitorHealth();
rm.start();
checkMonitorHealth();
checkStandbyRMFunctionality();
verifyClusterMetrics(0,0,0,0,0,0);
rm.adminService.transitionToStandby(requestInfo);
checkMonitorHealth();
checkStandbyRMFunctionality();
verifyClusterMetrics(0,0,0,0,0,0);
rm.adminService.transitionToActive(requestInfo);
checkMonitorHealth();
checkActiveRMFunctionality();
verifyClusterMetrics(1,1,1,1,2048,1);
rm.adminService.transitionToActive(requestInfo);
checkMonitorHealth();
checkActiveRMFunctionality();
verifyClusterMetrics(1,2,2,2,2048,2);
rm.adminService.transitionToStandby(requestInfo);
checkMonitorHealth();
checkStandbyRMFunctionality();
verifyClusterMetrics(0,0,0,0,0,0);
rm.adminService.transitionToActive(requestInfo);
checkMonitorHealth();
checkActiveRMFunctionality();
verifyClusterMetrics(1,1,1,1,2048,1);
rm.stop();
assertEquals(STATE_ERR,HAServiceState.STOPPING,rm.adminService.getServiceStatus().getState());
assertFalse("RM is ready to become active even after it is stopped",rm.adminService.getServiceStatus().isReadyToBecomeActive());
assertFalse("Active RM services are started",rm.areActiveServicesRunning());
checkMonitorHealth();
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testMoveAppSameParent() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("a1"));
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
List appsInA2=scheduler.getAppsInQueue("a2");
assertTrue(appsInA2.isEmpty());
scheduler.moveApplication(app.getApplicationId(),"a2");
appsInA2=scheduler.getAppsInQueue("a2");
assertEquals(1,appsInA2.size());
queue=scheduler.getApplicationAttempt(appsInA2.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("a2"));
appsInA1=scheduler.getAppsInQueue("a1");
assertTrue(appsInA1.isEmpty());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
rm.stop();
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testMoveAllApps() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("a1"));
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
List appsInB1=scheduler.getAppsInQueue("b1");
assertTrue(appsInB1.isEmpty());
List appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.isEmpty());
scheduler.moveAllApps("a1","b1");
Thread.sleep(1000);
appsInB1=scheduler.getAppsInQueue("b1");
assertEquals(1,appsInB1.size());
queue=scheduler.getApplicationAttempt(appsInB1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("b1"));
appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.contains(appAttemptId));
assertEquals(1,appsInB.size());
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
appsInA1=scheduler.getAppsInQueue("a1");
assertTrue(appsInA1.isEmpty());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.isEmpty());
rm.stop();
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testKillAllAppsInQueue() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("a1"));
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
scheduler.killAllAppsInQueue("a1");
rm.waitForState(app.getApplicationId(),RMAppState.KILLED);
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.isEmpty());
appsInA1=scheduler.getAppsInQueue("a1");
assertTrue(appsInA1.isEmpty());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.isEmpty());
rm.stop();
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testMoveAppBasic() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("a1"));
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
List appsInB1=scheduler.getAppsInQueue("b1");
assertTrue(appsInB1.isEmpty());
List appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.isEmpty());
scheduler.moveApplication(app.getApplicationId(),"b1");
appsInB1=scheduler.getAppsInQueue("b1");
assertEquals(1,appsInB1.size());
queue=scheduler.getApplicationAttempt(appsInB1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("b1"));
appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.contains(appAttemptId));
assertEquals(1,appsInB.size());
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
appsInA1=scheduler.getAppsInQueue("a1");
assertTrue(appsInA1.isEmpty());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.isEmpty());
rm.stop();
}
UtilityVerifierBooleanVerifierHybridVerifier
@Test(timeout=30000) public void testConfValidation() throws Exception {
ResourceScheduler scheduler=new CapacityScheduler();
scheduler.setRMContext(resourceManager.getRMContext());
Configuration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,2048);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,1024);
try {
scheduler.reinitialize(conf,mockContext);
fail("Exception is expected because the min memory allocation is" + " larger than the max memory allocation.");
}
catch ( YarnRuntimeException e) {
assertTrue("The thrown exception is not the expected one.",e.getMessage().startsWith("Invalid resource scheduler memory"));
}
conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,2);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,1);
try {
scheduler.reinitialize(conf,mockContext);
fail("Exception is expected because the min vcores allocation is" + " larger than the max vcores allocation.");
}
catch ( YarnRuntimeException e) {
assertTrue("The thrown exception is not the expected one.",e.getMessage().startsWith("Invalid resource scheduler vcores"));
}
}
UtilityVerifierBooleanVerifierHybridVerifier
@Test(timeout=30000) public void testConfValidation() throws Exception {
FairScheduler scheduler=new FairScheduler();
Configuration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,2048);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,1024);
try {
scheduler.serviceInit(conf);
fail("Exception is expected because the min memory allocation is" + " larger than the max memory allocation.");
}
catch ( YarnRuntimeException e) {
assertTrue("The thrown exception is not the expected one.",e.getMessage().startsWith("Invalid resource scheduler memory"));
}
conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,2);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,1);
try {
scheduler.serviceInit(conf);
fail("Exception is expected because the min vcores allocation is" + " larger than the max vcores allocation.");
}
catch ( YarnRuntimeException e) {
assertTrue("The thrown exception is not the expected one.",e.getMessage().startsWith("Invalid resource scheduler vcores"));
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@SuppressWarnings("resource") @Test public void testBlacklistNodes() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
final int GB=1024;
String host="127.0.0.1";
RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(16 * GB,16),0,host);
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
ApplicationAttemptId appAttemptId=createSchedulingRequest(GB,"root.default","user",1);
FSAppAttempt app=scheduler.getSchedulerApp(appAttemptId);
scheduler.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),Collections.singletonList(host),null);
assertTrue(app.isBlacklisted(host));
scheduler.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),null,Collections.singletonList(host));
assertFalse(scheduler.getSchedulerApp(appAttemptId).isBlacklisted(host));
List update=Arrays.asList(createResourceRequest(GB,node.getHostName(),1,0,true));
scheduler.allocate(appAttemptId,update,Collections.emptyList(),Collections.singletonList(host),null);
assertTrue(app.isBlacklisted(host));
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Incorrect number of containers allocated",0,app.getLiveContainers().size());
scheduler.allocate(appAttemptId,update,Collections.emptyList(),null,Collections.singletonList(host));
assertFalse(app.isBlacklisted(host));
createSchedulingRequest(GB,"root.default","user",1);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Incorrect number of containers allocated",1,app.getLiveContainers().size());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testAppsQueryStates() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
RMApp killedApp=rm.submitApp(CONTAINER_MB);
rm.killApp(killedApp.getApplicationId());
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
MultivaluedMapImpl params=new MultivaluedMapImpl();
params.add("states",YarnApplicationState.ACCEPTED.toString());
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("state not equal to ACCEPTED","ACCEPTED",array.getJSONObject(0).getString("state"));
r=resource();
params=new MultivaluedMapImpl();
params.add("states",YarnApplicationState.ACCEPTED.toString());
params.add("states",YarnApplicationState.KILLED.toString());
response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue("both app states of ACCEPTED and KILLED are not present",(array.getJSONObject(0).getString("state").equals("ACCEPTED") && array.getJSONObject(1).getString("state").equals("KILLED")) || (array.getJSONObject(0).getString("state").equals("KILLED") && array.getJSONObject(1).getString("state").equals("ACCEPTED")));
rm.stop();
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testNonexistApp() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB,"testwordcount","user1");
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
try {
r.path("ws").path("v1").path("cluster").path("apps").path("application_00000_0099").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid appid");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: app with id: application_00000_0099 not found",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
finally {
rm.stop();
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testAppsQueryAppTypes() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
Thread.sleep(1);
RMApp app1=rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId());
am.registerAppAttempt();
am.unregisterAppAttempt();
amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE);
rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"MAPREDUCE");
rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"NON-YARN");
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("MAPREDUCE",array.getJSONObject(0).getString("applicationType"));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("MAPREDUCE")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("MAPREDUCE")));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN,NON-YARN").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("NON-YARN")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("NON-YARN")));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",3,array.length());
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN,NON-YARN").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",3,array.length());
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN").queryParam("applicationTypes","").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("YARN",array.getJSONObject(0).getString("applicationType"));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes",",,, ,, YARN ,, ,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("YARN",array.getJSONObject(0).getString("applicationType"));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes",",,, ,, ,, ,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",3,array.length());
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN, ,NON-YARN, ,,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("NON-YARN")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("NON-YARN")));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes"," YARN, , ,,,").queryParam("applicationTypes","MAPREDUCE , ,, ,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("MAPREDUCE")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("MAPREDUCE")));
rm.stop();
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testNonexistAppAttempts() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB,"testwordcount","user1");
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
try {
r.path("ws").path("v1").path("cluster").path("apps").path("application_00000_0099").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid appid");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: app with id: application_00000_0099 not found",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
finally {
rm.stop();
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=20000) public void testMultipleAppAttempts() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",8192);
RMApp app1=rm.submitApp(CONTAINER_MB,"testwordcount","user1");
MockAM am=MockRM.launchAndRegisterAM(app1,rm,amNodeManager);
int maxAppAttempts=rm.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
assertTrue(maxAppAttempts > 1);
int numAttempt=1;
while (true) {
amNodeManager.nodeHeartbeat(am.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am.waitForState(RMAppAttemptState.FAILED);
if (numAttempt == maxAppAttempts) {
rm.waitForState(app1.getApplicationId(),RMAppState.FAILED);
break;
}
rm.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
am=MockRM.launchAndRegisterAM(app1,rm,amNodeManager);
numAttempt++;
}
assertEquals("incorrect number of attempts",maxAppAttempts,app1.getAppAttempts().values().size());
testAppAttemptsHelper(app1.getApplicationId().toString(),app1,MediaType.APPLICATION_JSON);
rm.stop();
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testAppsQueryStatesComma() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
RMApp killedApp=rm.submitApp(CONTAINER_MB);
rm.killApp(killedApp.getApplicationId());
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
MultivaluedMapImpl params=new MultivaluedMapImpl();
params.add("states",YarnApplicationState.ACCEPTED.toString());
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("state not equal to ACCEPTED","ACCEPTED",array.getJSONObject(0).getString("state"));
r=resource();
params=new MultivaluedMapImpl();
params.add("states",YarnApplicationState.ACCEPTED.toString() + "," + YarnApplicationState.KILLED.toString());
response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue("both app states of ACCEPTED and KILLED are not present",(array.getJSONObject(0).getString("state").equals("ACCEPTED") && array.getJSONObject(1).getString("state").equals("KILLED")) || (array.getJSONObject(0).getString("state").equals("KILLED") && array.getJSONObject(1).getString("state").equals("ACCEPTED")));
rm.stop();
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testNMSimulator() throws Exception {
NMSimulator node1=new NMSimulator();
node1.init("rack1/node1",GB * 10,10,0,1000,rm);
node1.middleStep();
Assert.assertEquals(1,rm.getResourceScheduler().getNumClusterNodes());
Assert.assertEquals(GB * 10,rm.getResourceScheduler().getRootQueueMetrics().getAvailableMB());
Assert.assertEquals(10,rm.getResourceScheduler().getRootQueueMetrics().getAvailableVirtualCores());
ContainerId cId1=newContainerId(1,1,1);
Container container1=Container.newInstance(cId1,null,null,Resources.createResource(GB,1),null,null);
node1.addNewContainer(container1,100000l);
Assert.assertTrue("Node1 should have one running container.",node1.getRunningContainers().containsKey(cId1));
ContainerId cId2=newContainerId(2,1,1);
Container container2=Container.newInstance(cId2,null,null,Resources.createResource(GB,1),null,null);
node1.addNewContainer(container2,-1l);
Assert.assertTrue("Node1 should have one running AM container",node1.getAMContainers().contains(cId2));
node1.cleanupContainer(cId1);
Assert.assertTrue("Container1 should be removed from Node1.",node1.getCompletedContainers().contains(cId1));
node1.cleanupContainer(cId2);
Assert.assertFalse("Container2 should be removed from Node1.",node1.getAMContainers().contains(cId2));
}
BooleanVerifierNullVerifierHybridVerifier
/**
* Test the yarn version info routines.
* @throws IOException
*/
@Test public void versionInfoGenerated() throws IOException {
assertTrue("getVersion returned Unknown",!YarnVersionInfo.getVersion().equals("Unknown"));
assertTrue("getUser returned Unknown",!YarnVersionInfo.getUser().equals("Unknown"));
assertTrue("getSrcChecksum returned Unknown",!YarnVersionInfo.getSrcChecksum().equals("Unknown"));
assertNotNull("getUrl returned null",YarnVersionInfo.getUrl());
assertNotNull("getRevision returned null",YarnVersionInfo.getRevision());
assertNotNull("getBranch returned null",YarnVersionInfo.getBranch());
assertTrue("getBuildVersion check doesn't contain: source checksum",YarnVersionInfo.getBuildVersion().contains("source checksum"));
}
Class: org.apache.hadoop.yarn.webapp.TestWebApp
InternalCallVerifierEqualityVerifierExceptionVerifierHybridVerifier
@Test(expected=org.apache.hadoop.yarn.webapp.WebAppException.class) public void testCreateWithNonZeroPort(){
WebApp app=WebApps.$for(this).at(50000).start();
int port=app.getListenerAddress().getPort();
assertEquals(50000,port);
WebApp app2=WebApps.$for(this).at(50000).start();
app.stop();
app2.stop();
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testCreateWithPort(){
WebApp app=WebApps.$for(this).at(0).start();
int port=app.getListenerAddress().getPort();
assertTrue(port > 0);
app.stop();
app=WebApps.$for(this).at(port).start();
assertEquals(port,app.getListenerAddress().getPort());
app.stop();
}